code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Any: """simple docstring""" super().__init__() __lowercase = nn.ModuleList(_UpperCAmelCase ) def a__ ( self : Dict , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Union[torch.Tensor, float, int] , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : List[torch.tensor] , _UpperCAmelCase : List[float] , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , ) -> Union[ControlNetOutput, Tuple]: """simple docstring""" for i, (image, scale, controlnet) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase , self.nets ) ): __lowercase , __lowercase = controlnet( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) # merge samples if i == 0: __lowercase , __lowercase = down_samples, mid_sample else: __lowercase = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(_UpperCAmelCase , _UpperCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def a__ ( self : Tuple , _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : bool = True , _UpperCAmelCase : Callable = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , ) -> Optional[int]: """simple docstring""" __lowercase = 0 __lowercase = save_directory for controlnet in self.nets: controlnet.save_pretrained( _UpperCAmelCase , is_main_process=_UpperCAmelCase , save_function=_UpperCAmelCase , safe_serialization=_UpperCAmelCase , variant=_UpperCAmelCase , ) idx += 1 __lowercase = model_path_to_save + f"""_{idx}""" @classmethod def a__ ( cls : Optional[int] , _UpperCAmelCase : Optional[Union[str, os.PathLike]] , **_UpperCAmelCase : Any ) -> Tuple: """simple docstring""" __lowercase = 0 __lowercase = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... __lowercase = pretrained_model_path while os.path.isdir(_UpperCAmelCase ): __lowercase = ControlNetModel.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) controlnets.append(_UpperCAmelCase ) idx += 1 __lowercase = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(_UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(_UpperCAmelCase ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(_UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(_UpperCAmelCase )
325
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
1
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=None ) -> Union[str, Any]: """simple docstring""" super().__init__( _UpperCAmelCase , question_encoder_tokenizer=_UpperCAmelCase , generator_tokenizer=_UpperCAmelCase , index=_UpperCAmelCase , init_retrieval=_UpperCAmelCase , ) __lowercase = None def a__ ( self : Dict , _UpperCAmelCase : int ) -> List[str]: """simple docstring""" logger.info('initializing retrieval' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('dist initialized' ) # needs to be set manually __lowercase = self._infer_socket_ifname() # avoid clash with the NCCL port __lowercase = str(distributed_port + 1 ) __lowercase = dist.new_group(ranks=_UpperCAmelCase , backend='gloo' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('dist not initialized / main' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def a__ ( self : str ) -> str: """simple docstring""" return dist.get_rank(group=self.process_group ) == 0 def a__ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=torch.floataa ) -> Optional[int]: """simple docstring""" __lowercase = torch.empty(_UpperCAmelCase , dtype=_UpperCAmelCase ) dist.scatter(_UpperCAmelCase , src=0 , scatter_list=_UpperCAmelCase , group=self.process_group ) return target_tensor def a__ ( self : str ) -> Optional[int]: """simple docstring""" __lowercase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __lowercase = next((addr for addr in addrs if addr.startswith('e' )) , _UpperCAmelCase ) return ifname def a__ ( self : Union[str, Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]: """simple docstring""" if not dist.is_initialized(): __lowercase , __lowercase = self._main_retrieve(_UpperCAmelCase , _UpperCAmelCase ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_UpperCAmelCase ) # distributed training __lowercase = dist.get_world_size(group=self.process_group ) # gather logic __lowercase = None if self._is_main(): __lowercase = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_UpperCAmelCase )] dist.gather(torch.tensor(_UpperCAmelCase ) , dst=0 , gather_list=_UpperCAmelCase , group=self.process_group ) # scatter logic __lowercase = question_hidden_states.shape[0] __lowercase = [] __lowercase = [] if self._is_main(): assert len(_UpperCAmelCase ) == world_size __lowercase , __lowercase = self._main_retrieve(torch.cat(_UpperCAmelCase ).numpy() , _UpperCAmelCase ) __lowercase , __lowercase = torch.tensor(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase ) __lowercase = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self._chunk_tensor(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self._scattered(_UpperCAmelCase , [n_queries, n_docs] , target_type=torch.intaa ) __lowercase = self._scattered(_UpperCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_UpperCAmelCase )
325
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp SCREAMING_SNAKE_CASE__ = 5 SCREAMING_SNAKE_CASE__ = 10 @require_sentencepiece @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase = sp.SentencePieceProcessor() spm_model.Load(_UpperCAmelCase ) __lowercase = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : str ) -> int: """simple docstring""" __lowercase = '<pad>' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 10_01 ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) __lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class A__ ( unittest.TestCase ): lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase__ : Dict = "C'est trop cool" lowerCAmelCase__ : List[Any] = "Esto es genial" @classmethod def a__ ( cls : Any ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def a__ ( self : str ) -> int: """simple docstring""" self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids ) __lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2] __lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = 'fr' __lowercase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _UpperCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __lowercase = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
325
1
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ): return x return (x, x) @require_tf class A__ : def a__ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Tuple: """simple docstring""" pass def a__ ( self : List[str] ) -> List[str]: """simple docstring""" pass def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" pass def a__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : List[str] ) -> Tuple: """simple docstring""" __lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(_UpperCAmelCase ) __lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def a__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase ) __lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Any=None , **_UpperCAmelCase : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = {'vision_model': vision_model, 'text_model': text_model} __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCAmelCase ) __lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase ) __lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) __lowercase = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase ) __lowercase = model(input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) __lowercase = after_output[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_UpperCAmelCase , 1e-5 ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase ) __lowercase = model( input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_UpperCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_UpperCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def a__ ( self : Dict , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> Optional[Any]: """simple docstring""" __lowercase = np.abs((a - b) ).max() self.assertLessEqual(_UpperCAmelCase , _UpperCAmelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_UpperCAmelCase ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_UpperCAmelCase ) def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_save_load(**_UpperCAmelCase ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_UpperCAmelCase ) @slow def a__ ( self : Any ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_pretrained_model_and_inputs() __lowercase = model_a(**_UpperCAmelCase ) __lowercase = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel.from_pretrained(_UpperCAmelCase ) __lowercase = model_a(**_UpperCAmelCase ) __lowercase = after_outputs[0].numpy() __lowercase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_UpperCAmelCase , 1e-5 ) @require_tf class A__ ( lowerCAmelCase__ , unittest.TestCase ): def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def a__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" __lowercase = TFViTModel(_UpperCAmelCase , name='vision_model' ) __lowercase = TFBertModel(_UpperCAmelCase , name='text_model' ) return vision_model, text_model def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = TFViTModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( lowerCAmelCase__ , unittest.TestCase ): def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def a__ ( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.get_vision_text_model(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = TFVisionTextDualEncoderModel(vision_model=_UpperCAmelCase , text_model=_UpperCAmelCase ) __lowercase = model( input_ids=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase ) __lowercase = output.vision_model_output.attentions self.assertEqual(len(_UpperCAmelCase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __lowercase = to_atuple(vision_model.config.image_size ) __lowercase = to_atuple(vision_model.config.patch_size ) __lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __lowercase = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __lowercase = output.text_model_output.attentions self.assertEqual(len(_UpperCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ) -> int: """simple docstring""" __lowercase = TFDeiTModel(_UpperCAmelCase , name='vision_model' ) __lowercase = TFRobertaModel(_UpperCAmelCase , name='text_model' ) return vision_model, text_model def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = TFDeiTModelTester(self ) __lowercase = TFRobertaModelTester(self ) __lowercase = vit_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class A__ ( lowerCAmelCase__ , unittest.TestCase ): def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained( 'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' ) __lowercase = 13 __lowercase = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __lowercase = random_attention_mask([batch_size, 4] ) __lowercase = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def a__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> Tuple: """simple docstring""" __lowercase = TFCLIPVisionModel(_UpperCAmelCase , name='vision_model' ) __lowercase = TFBertModel(_UpperCAmelCase , name='text_model' ) return vision_model, text_model def a__ ( self : int ) -> Any: """simple docstring""" __lowercase = TFCLIPVisionModelTester(self ) __lowercase = TFBertModelTester(self ) __lowercase = clip_model_tester.prepare_config_and_inputs() __lowercase = bert_model_tester.prepare_config_and_inputs() __lowercase , __lowercase = vision_config_and_inputs ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class A__ ( unittest.TestCase ): @slow def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = TFVisionTextDualEncoderModel.from_pretrained( 'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_UpperCAmelCase ) __lowercase = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) __lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) __lowercase = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' ) __lowercase = model(**_UpperCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __lowercase = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _UpperCAmelCase , atol=1e-3 ) )
325
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = "layoutlmv3" def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = max_ad_position_embeddings __lowercase = coordinate_size __lowercase = shape_size __lowercase = has_relative_attention_bias __lowercase = rel_pos_bins __lowercase = max_rel_pos __lowercase = has_spatial_attention_bias __lowercase = rel_ad_pos_bins __lowercase = max_rel_ad_pos __lowercase = text_embed __lowercase = visual_embed __lowercase = input_size __lowercase = num_channels __lowercase = patch_size __lowercase = classifier_dropout class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = version.parse("1.12" ) @property def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def a__ ( self : int ) -> float: """simple docstring""" return 1e-5 @property def a__ ( self : str ) -> int: """simple docstring""" return 12 def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowercase = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = dict( processor( _UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) ) return inputs
325
1
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 4 ) -> list[list[int]]: __lowercase = abs(SCREAMING_SNAKE_CASE ) or 4 return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE )] for y in range(SCREAMING_SNAKE_CASE )] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]: return reverse_row(transpose(SCREAMING_SNAKE_CASE ) ) # OR.. transpose(reverse_column(matrix)) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]: return reverse_row(reverse_column(SCREAMING_SNAKE_CASE ) ) # OR.. reverse_column(reverse_row(matrix)) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]: return reverse_column(transpose(SCREAMING_SNAKE_CASE ) ) # OR.. transpose(reverse_row(matrix)) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]: __lowercase = [list(SCREAMING_SNAKE_CASE ) for x in zip(*SCREAMING_SNAKE_CASE )] return matrix def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]: __lowercase = matrix[::-1] return matrix def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> list[list[int]]: __lowercase = [x[::-1] for x in matrix] return matrix def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> None: for i in matrix: print(*SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 90 counterclockwise:\n""") print_matrix(rotate_aa(matrix)) SCREAMING_SNAKE_CASE__ = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 180:\n""") print_matrix(rotate_aaa(matrix)) SCREAMING_SNAKE_CASE__ = make_matrix() print("""\norigin:\n""") print_matrix(matrix) print("""\nrotate 270 counterclockwise:\n""") print_matrix(rotate_aaa(matrix))
325
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE__ = """RegNetConfig""" # Base docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = """tabby, tabby cat""" SCREAMING_SNAKE_CASE__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A__ ( nn.Module ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any: """simple docstring""" super().__init__() __lowercase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowercase = config.num_channels def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase = self.embedder(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: """simple docstring""" super().__init__() __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) __lowercase = nn.Sequential( nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def a__ ( self : str , _UpperCAmelCase : Dict ) -> str: """simple docstring""" __lowercase = self.pooler(_UpperCAmelCase ) __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_state * attention return hidden_state class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict: """simple docstring""" super().__init__() __lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , ) def a__ ( self : Any , _UpperCAmelCase : str ) -> int: """simple docstring""" __lowercase = self.layers(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int: """simple docstring""" super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(_UpperCAmelCase ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = RegNetConfig lowerCAmelCase__ : Optional[int] = "regnet" lowerCAmelCase__ : Dict = "pixel_values" lowerCAmelCase__ : List[str] = True def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = value SCREAMING_SNAKE_CASE__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SCREAMING_SNAKE_CASE__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A__ ( lowerCAmelCase__ ): def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config __lowercase = RegNetEmbeddings(_UpperCAmelCase ) __lowercase = RegNetEncoder(_UpperCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(_UpperCAmelCase ) __lowercase = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config.num_labels __lowercase = RegNetModel(_UpperCAmelCase ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(_UpperCAmelCase ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = 'single_label_classification' else: __lowercase = 'multi_label_classification' if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
325
1
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : str = GPTaTokenizer lowerCAmelCase__ : List[Any] = GPTaTokenizerFast lowerCAmelCase__ : Tuple = True lowerCAmelCase__ : Dict = {"add_prefix_space": True} lowerCAmelCase__ : str = False def a__ ( self : Dict ) -> Optional[int]: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __lowercase = {'unk_token': '<unk>'} __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_UpperCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_UpperCAmelCase ) ) def a__ ( self : Optional[int] , **_UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a__ ( self : List[Any] , **_UpperCAmelCase : Any ) -> List[str]: """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a__ ( self : Any , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = 'lower newer' __lowercase = 'lower newer' return input_text, output_text def a__ ( self : int ) -> List[str]: """simple docstring""" __lowercase = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowercase = 'lower newer' __lowercase = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] __lowercase = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = tokens + [tokenizer.unk_token] __lowercase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" if not self.test_rust_tokenizer: return __lowercase = self.get_tokenizer() __lowercase = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) __lowercase = 'lower newer' # Testing tokenization __lowercase = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) __lowercase = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids without special tokens __lowercase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) __lowercase = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing conversion to ids with special tokens __lowercase = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase ) __lowercase = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase ) __lowercase = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) # Testing the unknown token __lowercase = tokens + [rust_tokenizer.unk_token] __lowercase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Dict , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" pass def a__ ( self : Optional[Any] , _UpperCAmelCase : Tuple=15 ) -> Any: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowercase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # Simple input __lowercase = 'This is a simple input' __lowercase = ['This is a simple input 1', 'This is a simple input 2'] __lowercase = ('This is a simple input', 'This is a pair') __lowercase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , ) def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input __lowercase = 'This is a simple input' __lowercase = ['This is a simple input looooooooong', 'This is a simple input'] __lowercase = ('This is a simple input', 'This is a pair') __lowercase = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] __lowercase = tokenizer.pad_token_id __lowercase = tokenizer(_UpperCAmelCase , padding='max_length' , max_length=30 , return_tensors='np' ) __lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='np' ) __lowercase = tokenizer(*_UpperCAmelCase , padding='max_length' , max_length=60 , return_tensors='np' ) __lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def a__ ( self : Optional[int] ) -> int: """simple docstring""" __lowercase = '$$$' __lowercase = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase ) __lowercase = 'This is a simple input' __lowercase = ['This is a simple input 1', 'This is a simple input 2'] __lowercase = tokenizer.bos_token_id __lowercase = tokenizer(_UpperCAmelCase ) __lowercase = tokenizer(_UpperCAmelCase ) self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __lowercase = tokenizer.decode(out_s.input_ids ) __lowercase = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , _UpperCAmelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" pass def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = [self.get_tokenizer(do_lower_case=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )] for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): __lowercase = 'Encode this.' __lowercase = 'This one too please.' __lowercase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) encoded_sequence += tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) __lowercase = tokenizer.encode_plus( _UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , ) __lowercase = encoded_sequence_dict['input_ids'] __lowercase = encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) __lowercase = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase ) ] __lowercase = [x for x in filtered_sequence if x is not None] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) @require_tokenizers class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_UpperCAmelCase ) __lowercase = 'A photo of a cat' __lowercase = tokenizer.encode( _UpperCAmelCase , ) self.assertEqual(_UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('test_opt' ) __lowercase = AutoTokenizer.from_pretrained('./test_opt' ) __lowercase = tokenizer.encode( _UpperCAmelCase , ) self.assertEqual(_UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58] ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=_UpperCAmelCase ) __lowercase = 'A photo of a cat' __lowercase = tokenizer.encode( _UpperCAmelCase , ) # Same as above self.assertEqual(_UpperCAmelCase , [2, 2_50, 13_45, 9, 10, 47_58] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_UpperCAmelCase ) __lowercase = 'bos' __lowercase = tokenizer.get_vocab()['bos'] __lowercase = 'A photo of a cat' __lowercase = tokenizer.encode( _UpperCAmelCase , ) # We changed the bos token self.assertEqual(_UpperCAmelCase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('./tok' ) __lowercase = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) __lowercase = tokenizer.encode( _UpperCAmelCase , ) self.assertEqual(_UpperCAmelCase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
325
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
325
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> Optional[int]: # A local function to see if a dot lands in the circle. def is_in_circle(SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> bool: __lowercase = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __lowercase = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) # The ratio of the area for circle to square is pi/4. __lowercase = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 , ) -> float: return mean( function_to_integrate(uniform(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) for _ in range(SCREAMING_SNAKE_CASE ) ) * (max_value - min_value) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : float = 1.0 ) -> None: def identity_function(SCREAMING_SNAKE_CASE : float ) -> float: return x __lowercase = area_under_curve_estimator( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print('******************' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> None: def function_to_integrate(SCREAMING_SNAKE_CASE : float ) -> float: return sqrt(4.0 - x * x ) __lowercase = area_under_curve_estimator( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
325
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) class A__ ( enum.Enum ): lowerCAmelCase__ : Dict = "all_checks" lowerCAmelCase__ : List[Any] = "basic_checks" lowerCAmelCase__ : Dict = "no_checks" class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]: if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __lowercase = ' for ' + verification_name if verification_name is not None else '' if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]: if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) ) logger.info('All the splits matched successfully.' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict: if record_checksum: __lowercase = shaaaa() with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b'' ): m.update(SCREAMING_SNAKE_CASE ) __lowercase = m.hexdigest() else: __lowercase = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
325
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = """▁""" SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """sentencepiece.bpe.model"""} SCREAMING_SNAKE_CASE__ = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), } } SCREAMING_SNAKE_CASE__ = { """facebook/mbart-large-en-ro""": 1024, """facebook/mbart-large-cc25""": 1024, } # fmt: off SCREAMING_SNAKE_CASE__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Tuple = ["input_ids", "attention_mask"] lowerCAmelCase__ : List[int] = [] lowerCAmelCase__ : List[int] = [] def __init__( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple="<s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Optional[int]="<mask>" , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : int=None , **_UpperCAmelCase : List[Any] , ) -> Tuple: """simple docstring""" __lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) __lowercase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase = 1 __lowercase = len(self.sp_model ) __lowercase = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase ) } __lowercase = {v: k for k, v in self.lang_code_to_id.items()} __lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __lowercase = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __lowercase = src_lang if src_lang is not None else 'en_XX' __lowercase = self.lang_code_to_id[self._src_lang] __lowercase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ) -> int: """simple docstring""" __lowercase = self.__dict__.copy() __lowercase = None __lowercase = self.sp_model.serialized_model_proto() return state def __setstate__( self : str , _UpperCAmelCase : List[str] ) -> int: """simple docstring""" __lowercase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase = {} __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def a__ ( self : Optional[int] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a__ ( self : int , _UpperCAmelCase : str ) -> None: """simple docstring""" __lowercase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) __lowercase = [1] * len(self.prefix_tokens ) __lowercase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def a__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a__ ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : Tuple ) -> Any: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __lowercase = src_lang __lowercase = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) __lowercase = self.convert_tokens_to_ids(_UpperCAmelCase ) __lowercase = tgt_lang_id return inputs def a__ ( self : str ) -> List[str]: """simple docstring""" __lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a__ ( self : Dict , _UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def a__ ( self : List[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def a__ ( self : Any , _UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def a__ ( self : int , _UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" __lowercase = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip() return out_string def a__ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , 'wb' ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def a__ ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "en_XX" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "ro_RO" , **_UpperCAmelCase : Tuple , ) -> BatchEncoding: """simple docstring""" __lowercase = src_lang __lowercase = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a__ ( self : Any , _UpperCAmelCase : List[Any] ) -> None: """simple docstring""" __lowercase = self.lang_code_to_id[src_lang] __lowercase = [] __lowercase = [self.eos_token_id, self.cur_lang_code] def a__ ( self : Tuple , _UpperCAmelCase : str ) -> None: """simple docstring""" __lowercase = self.lang_code_to_id[lang] __lowercase = [] __lowercase = [self.eos_token_id, self.cur_lang_code]
325
import math def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict: __lowercase = factor * value __lowercase = value while not is_prime(SCREAMING_SNAKE_CASE ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE ) return value
325
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = "audio-spectrogram-transformer" def __init__( self : Dict , _UpperCAmelCase : Union[str, Any]=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : Optional[Any]=30_72 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=1e-1_2 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Tuple=10 , _UpperCAmelCase : List[Any]=10_24 , _UpperCAmelCase : Optional[int]=1_28 , **_UpperCAmelCase : List[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__(**_UpperCAmelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = patch_size __lowercase = qkv_bias __lowercase = frequency_stride __lowercase = time_stride __lowercase = max_length __lowercase = num_mel_bins
325
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
1
import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Any = None lowerCAmelCase__ : Any = BloomTokenizerFast lowerCAmelCase__ : Union[str, Any] = BloomTokenizerFast lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : Dict = "tokenizer_file" lowerCAmelCase__ : str = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" super().setUp() __lowercase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : List[Any] , **_UpperCAmelCase : List[str] ) -> Dict: """simple docstring""" kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = self.get_rust_tokenizer() __lowercase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] __lowercase = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] __lowercase = tokenizer.batch_encode_plus(_UpperCAmelCase )['input_ids'] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = tokenizer.batch_decode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : Tuple , _UpperCAmelCase : List[str]=6 ) -> List[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowercase = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __lowercase = 'This is a simple input' __lowercase = ['This is a simple input 1', 'This is a simple input 2'] __lowercase = ('This is a simple input', 'This is a pair') __lowercase = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.encode(_UpperCAmelCase , max_length=_UpperCAmelCase ) tokenizer_r.batch_encode_plus(_UpperCAmelCase , max_length=_UpperCAmelCase ) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding' ) __lowercase = None # Hotfixing padding = None self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Simple input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Simple input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Pair input self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' ) # Pair input self.assertRaises( _UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , ) def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.get_rust_tokenizer() __lowercase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=_UpperCAmelCase ) __lowercase = next(iter(_UpperCAmelCase ) )['premise'] # pick up one data __lowercase = list(sample_data.values() ) __lowercase = list(map(tokenizer.encode , _UpperCAmelCase ) ) __lowercase = [tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens] self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
325
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
1
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[Any] = KandinskyVaaPriorPipeline lowerCAmelCase__ : Optional[int] = ["prompt"] lowerCAmelCase__ : Union[str, Any] = ["prompt", "negative_prompt"] lowerCAmelCase__ : Tuple = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] lowerCAmelCase__ : int = False @property def a__ ( self : Tuple ) -> int: """simple docstring""" return 32 @property def a__ ( self : Dict ) -> Tuple: """simple docstring""" return 32 @property def a__ ( self : List[Any] ) -> Dict: """simple docstring""" return self.time_input_dim @property def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return self.time_input_dim * 4 @property def a__ ( self : Dict ) -> Dict: """simple docstring""" return 1_00 @property def a__ ( self : List[str] ) -> Any: """simple docstring""" __lowercase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) __lowercase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(_UpperCAmelCase ) @property def a__ ( self : int ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __lowercase = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __lowercase = PriorTransformer(**_UpperCAmelCase ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __lowercase = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def a__ ( self : Tuple ) -> str: """simple docstring""" torch.manual_seed(0 ) __lowercase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __lowercase = CLIPVisionModelWithProjection(_UpperCAmelCase ) return model @property def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , ) return image_processor def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.dummy_prior __lowercase = self.dummy_image_encoder __lowercase = self.dummy_text_encoder __lowercase = self.dummy_tokenizer __lowercase = self.dummy_image_processor __lowercase = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=_UpperCAmelCase , clip_sample_range=10.0 , ) __lowercase = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=0 ) -> str: """simple docstring""" if str(_UpperCAmelCase ).startswith('mps' ): __lowercase = torch.manual_seed(_UpperCAmelCase ) else: __lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __lowercase = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = 'cpu' __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**_UpperCAmelCase ) __lowercase = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __lowercase = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) __lowercase = output.image_embeds __lowercase = pipe( **self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0] __lowercase = image[0, -10:] __lowercase = image_from_tuple[0, -10:] assert image.shape == (1, 32) __lowercase = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = torch_device == 'cpu' __lowercase = True __lowercase = False self._test_inference_batch_single_identical( test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , ) @skip_mps def a__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowercase = torch_device == 'cpu' __lowercase = False self._test_attention_slicing_forward_pass( test_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
325
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = "transfo-xl" lowerCAmelCase__ : int = ["mems"] lowerCAmelCase__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple: """simple docstring""" __lowercase = vocab_size __lowercase = [] self.cutoffs.extend(_UpperCAmelCase ) if proj_share_all_but_first: __lowercase = [False] + [True] * len(self.cutoffs ) else: __lowercase = [False] + [False] * len(self.cutoffs ) __lowercase = d_model __lowercase = d_embed __lowercase = d_head __lowercase = d_inner __lowercase = div_val __lowercase = pre_lnorm __lowercase = n_layer __lowercase = n_head __lowercase = mem_len __lowercase = same_length __lowercase = attn_type __lowercase = clamp_len __lowercase = sample_softmax __lowercase = adaptive __lowercase = dropout __lowercase = dropatt __lowercase = untie_r __lowercase = init __lowercase = init_range __lowercase = proj_init_std __lowercase = init_std __lowercase = layer_norm_epsilon super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
325
1
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) torch.set_grad_enabled(False) SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu""" def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any]=100 , SCREAMING_SNAKE_CASE : List[Any]=" " ) -> List[str]: __lowercase = text.split(SCREAMING_SNAKE_CASE ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : dict ) -> dict: __lowercase , __lowercase = [], [] for title, text in zip(documents['title'] , documents['text'] ): if text is not None: for passage in split_text(SCREAMING_SNAKE_CASE ): titles.append(title if title is not None else '' ) texts.append(SCREAMING_SNAKE_CASE ) return {"title": titles, "text": texts} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : DPRContextEncoder , SCREAMING_SNAKE_CASE : DPRContextEncoderTokenizerFast ) -> dict: __lowercase = ctx_tokenizer( documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )['input_ids'] __lowercase = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : "RagExampleArguments" , SCREAMING_SNAKE_CASE : "ProcessingArguments" , SCREAMING_SNAKE_CASE : "IndexHnswArguments" , ) -> List[str]: ###################################### logger.info('Step 1 - Create the dataset' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way __lowercase = load_dataset( 'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words __lowercase = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc ) # And compute the embeddings __lowercase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE ) __lowercase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) __lowercase = Features( {'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space __lowercase = dataset.map( partial(SCREAMING_SNAKE_CASE , ctx_encoder=SCREAMING_SNAKE_CASE , ctx_tokenizer=SCREAMING_SNAKE_CASE ) , batched=SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE , ) # And finally save your dataset __lowercase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' ) dataset.save_to_disk(SCREAMING_SNAKE_CASE ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('Step 2 - Index the dataset' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search __lowercase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE ) # And save the index __lowercase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' ) dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class A__ : lowerCAmelCase__ : str = field( default=str(Path(lowerCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) lowerCAmelCase__ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) lowerCAmelCase__ : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) lowerCAmelCase__ : Optional[str] = field( default=str(Path(lowerCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class A__ : lowerCAmelCase__ : Optional[int] = field( default=lowerCAmelCase__ , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) lowerCAmelCase__ : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class A__ : lowerCAmelCase__ : int = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) lowerCAmelCase__ : int = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) SCREAMING_SNAKE_CASE__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
325
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: for attribute in key.split('.' ): __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __lowercase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowercase = None for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __lowercase = True elif name.split('.' )[0] == "proj": __lowercase = fairseq_model.proj __lowercase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowercase = True if "*" in mapped_key: __lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __lowercase = 'weight_g' elif "weight_v" in name: __lowercase = 'weight_v' elif "bias" in name: __lowercase = 'bias' elif "weight" in name: __lowercase = 'weight' else: __lowercase = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: __lowercase = full_name.split('conv_layers.' )[-1] __lowercase = name.split('.' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) __lowercase = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __lowercase = f.readlines() __lowercase = [line.split(' ' )[0] for line in lines] __lowercase = len(SCREAMING_SNAKE_CASE ) __lowercase = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]: __lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaConfig.from_pretrained( SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE ) __lowercase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowercase = model[0].eval() # set weights for wav2vec2 encoder __lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE ) __lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE ) __lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __lowercase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) __lowercase = False # add projection layer __lowercase = nn.Parameter(projection_layer.weight ) __lowercase = nn.Parameter(projection_layer.bias ) __lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE ) with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = hf_wavavec.config.to_dict() __lowercase = tokenizer.pad_token_id __lowercase = tokenizer.bos_token_id __lowercase = tokenizer.eos_token_id __lowercase = 'speech_to_text_2' __lowercase = 'wav2vec2' __lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
325
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ : Any = "swin" lowerCAmelCase__ : Union[str, Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[Any] , _UpperCAmelCase : int=2_24 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Optional[Any]=96 , _UpperCAmelCase : str=[2, 2, 6, 2] , _UpperCAmelCase : Optional[Any]=[3, 6, 12, 24] , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : int=4.0 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Union[str, Any]=1e-5 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Optional[int] , ) -> List[str]: """simple docstring""" super().__init__(**_UpperCAmelCase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(_UpperCAmelCase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range __lowercase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) ) __lowercase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : str = version.parse("1.11" ) @property def a__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def a__ ( self : int ) -> float: """simple docstring""" return 1e-4
325
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: __lowercase = [0 for i in range(r + 1 )] # nc0 = 1 __lowercase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000 ) -> int: return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
325
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = ["vqvae"] def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase ) def a__ ( self : Tuple ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00 @torch.no_grad() def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" __lowercase = steps or self.get_default_steps() self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowercase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowercase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_UpperCAmelCase , device=self.device , ) __lowercase = noise __lowercase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase ) __lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __lowercase = (input_image / 2_55) * 2 - 1 __lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample( generator=_UpperCAmelCase )[0] __lowercase = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] ) __lowercase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowercase = int(mask_start_secs * pixels_per_second ) __lowercase = int(mask_end_secs * pixels_per_second ) __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _UpperCAmelCase ): __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample'] else: __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] if isinstance(self.scheduler , _UpperCAmelCase ): __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] else: __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] if mask is not None: if mask_start > 0: __lowercase = mask[:, step, :, :mask_start] if mask_end > 0: __lowercase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowercase = 1 / self.vqvae.config.scaling_factor * images __lowercase = self.vqvae.decode(_UpperCAmelCase )['sample'] __lowercase = (images / 2 + 0.5).clamp(0 , 1 ) __lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowercase = (images * 2_55).round().astype('uint8' ) __lowercase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) ) __lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) ) @torch.no_grad() def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , _UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __lowercase = (sample / 2_55) * 2 - 1 __lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowercase = self.scheduler.alphas_cumprod[t] __lowercase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowercase = 1 - alpha_prod_t __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] __lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor: """simple docstring""" __lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
325
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class A__ ( unittest.TestCase ): lowerCAmelCase__ : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase__ : str = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def a__ ( self : Any ) -> int: """simple docstring""" __lowercase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' ) # Using `do_sample=False` to force deterministic output __lowercase = text_generator('This is a test' , do_sample=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ] , ) __lowercase = text_generator(['This is a test', 'This is a second test'] ) self.assertEqual( _UpperCAmelCase , [ [ { 'generated_text': ( 'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.' ' oscope. FiliFili@@' ) } ], [ { 'generated_text': ( 'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy' ' oscope. oscope. FiliFili@@' ) } ], ] , ) __lowercase = text_generator('This is a test' , do_sample=_UpperCAmelCase , num_return_sequences=2 , return_tensors=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ {'generated_token_ids': ANY(_UpperCAmelCase )}, {'generated_token_ids': ANY(_UpperCAmelCase )}, ] , ) __lowercase = text_generator.model.config.eos_token_id __lowercase = '<pad>' __lowercase = text_generator( ['This is a test', 'This is a second test'] , do_sample=_UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_UpperCAmelCase , ) self.assertEqual( _UpperCAmelCase , [ [ {'generated_token_ids': ANY(_UpperCAmelCase )}, {'generated_token_ids': ANY(_UpperCAmelCase )}, ], [ {'generated_token_ids': ANY(_UpperCAmelCase )}, {'generated_token_ids': ANY(_UpperCAmelCase )}, ], ] , ) @require_tf def a__ ( self : Optional[int] ) -> Any: """simple docstring""" __lowercase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' ) # Using `do_sample=False` to force deterministic output __lowercase = text_generator('This is a test' , do_sample=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ] , ) __lowercase = text_generator(['This is a test', 'This is a second test'] , do_sample=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ [ { 'generated_text': ( 'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵' ' please,' ) } ], [ { 'generated_text': ( 'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes' ' Cannes 閲閲Cannes Cannes Cannes 攵 please,' ) } ], ] , ) def a__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ) -> Any: """simple docstring""" __lowercase = TextGenerationPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = 'Hello I believe in' __lowercase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) __lowercase = text_generator(_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , ) __lowercase = text_generator(_UpperCAmelCase , stop_sequence=' fe' ) self.assertEqual(_UpperCAmelCase , [{'generated_text': 'Hello I believe in fe'}] ) def a__ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> List[str]: """simple docstring""" __lowercase = text_generator.model __lowercase = text_generator.tokenizer __lowercase = text_generator('This is a test' ) self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) __lowercase = text_generator('This is a test' , return_full_text=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) __lowercase = pipeline(task='text-generation' , model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , return_full_text=_UpperCAmelCase ) __lowercase = text_generator('This is a test' ) self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] ) self.assertNotIn('This is a test' , outputs[0]['generated_text'] ) __lowercase = text_generator('This is a test' , return_full_text=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] ) self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) ) __lowercase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ [{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}], [{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: __lowercase = text_generator( ['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=_UpperCAmelCase ) self.assertEqual( _UpperCAmelCase , [ [{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}], [{'generated_text': ANY(_UpperCAmelCase )}, {'generated_text': ANY(_UpperCAmelCase )}], ] , ) with self.assertRaises(_UpperCAmelCase ): __lowercase = text_generator('test' , return_full_text=_UpperCAmelCase , return_text=_UpperCAmelCase ) with self.assertRaises(_UpperCAmelCase ): __lowercase = text_generator('test' , return_full_text=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) with self.assertRaises(_UpperCAmelCase ): __lowercase = text_generator('test' , return_text=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __lowercase = text_generator('' ) self.assertEqual(_UpperCAmelCase , [{'generated_text': ANY(_UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __lowercase = text_generator('' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __lowercase = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM'] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('This is a test' * 5_00 , max_new_tokens=20 ) __lowercase = text_generator('This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(_UpperCAmelCase ): text_generator( 'This is a test' * 5_00 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : str ) -> Optional[int]: """simple docstring""" import torch # Classic `model_kwargs` __lowercase = pipeline( model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __lowercase = pipe('This is a test' ) self.assertEqual( _UpperCAmelCase , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __lowercase = pipe('This is a test' ) self.assertEqual( _UpperCAmelCase , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) __lowercase = pipe('This is a test' ) self.assertEqual( _UpperCAmelCase , [ { 'generated_text': ( 'This is a test test test test test test test test test test test test test test test test' ' test' ) } ] , ) @require_torch @require_torch_gpu def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" import torch __lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa ) pipe('This is a test' ) @require_torch @require_accelerate @require_torch_gpu def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" import torch __lowercase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa ) pipe('This is a test' , do_sample=_UpperCAmelCase , top_p=0.5 ) def a__ ( self : Dict ) -> str: """simple docstring""" __lowercase = 'Hello world' __lowercase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' ) if text_generator.model.framework == "tf": __lowercase = logging.get_logger('transformers.generation.tf_utils' ) else: __lowercase = logging.get_logger('transformers.generation.utils' ) __lowercase = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(_UpperCAmelCase ) as cl: __lowercase = text_generator(_UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(_UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(_UpperCAmelCase ) as cl: __lowercase = text_generator(_UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(_UpperCAmelCase , cl.out ) with CaptureLogger(_UpperCAmelCase ) as cl: __lowercase = text_generator(_UpperCAmelCase , max_length=10 ) self.assertNotIn(_UpperCAmelCase , cl.out )
325
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. SCREAMING_SNAKE_CASE__ = 10 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if array[i] == target: return i return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __lowercase = one_third - 1 elif array[two_third] < target: __lowercase = two_third + 1 else: __lowercase = one_third + 1 __lowercase = two_third - 1 else: return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip() SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip()) SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target) SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
325
1
from maths.prime_check import is_prime def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> int: if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = F"""Input value of [number={number}] must be an integer""" raise TypeError(SCREAMING_SNAKE_CASE ) if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
325
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = module __lowercase = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) __lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ : int = "bigscience/bloom-1b7" # Constant values lowerCAmelCase__ : Any = 2.109659552692574 lowerCAmelCase__ : str = "Hello my name is" lowerCAmelCase__ : Any = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) lowerCAmelCase__ : List[Any] = 10 def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> int: """simple docstring""" __lowercase = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) __lowercase = config.to_dict() __lowercase = config.to_diff_dict() __lowercase = config.to_json_string() def a__ ( self : Dict ) -> Tuple: """simple docstring""" from bitsandbytes.nn import Paramsabit __lowercase = self.model_fpaa.get_memory_footprint() __lowercase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowercase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def a__ ( self : Tuple ) -> str: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def a__ ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = BitsAndBytesConfig() __lowercase = True __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : str ) -> List[str]: """simple docstring""" with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_fpaa.to(torch.floataa ) __lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowercase = self.model_fpaa.to('cpu' ) # Check this does not throw an error __lowercase = self.model_fpaa.half() # Check this does not throw an error __lowercase = self.model_fpaa.float() def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): @classmethod def a__ ( cls : int ) -> Tuple: """simple docstring""" __lowercase = 't5-small' __lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __lowercase = AutoTokenizer.from_pretrained(cls.model_name ) __lowercase = 'Translate in German: Hello, my dog is cute' def a__ ( self : List[Any] ) -> Dict: """simple docstring""" gc.collect() torch.cuda.empty_cache() def a__ ( self : int ) -> int: """simple docstring""" from transformers import TaForConditionalGeneration __lowercase = TaForConditionalGeneration._keep_in_fpaa_modules __lowercase = None # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) __lowercase = modules def a__ ( self : str ) -> Optional[Any]: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" super().setUp() # model_name __lowercase = 'bigscience/bloom-560m' __lowercase = 't5-small' # Different types of model __lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model __lowercase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model __lowercase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : int ) -> List[str]: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> str: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( lowerCAmelCase__ ): def a__ ( self : str ) -> str: """simple docstring""" super().setUp() def a__ ( self : Dict ) -> Any: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> int: """simple docstring""" __lowercase = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowercase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() def a__ ( self : List[Any] ) -> int: """simple docstring""" __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class A__ ( lowerCAmelCase__ ): def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = 'facebook/opt-350m' super().setUp() def a__ ( self : Dict ) -> List[str]: """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowercase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowercase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): __lowercase = LoRALayer(module.q_proj , rank=16 ) __lowercase = LoRALayer(module.k_proj , rank=16 ) __lowercase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowercase = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Any = "gpt2-xl" lowerCAmelCase__ : str = 3.3191854854152187
325
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html SCREAMING_SNAKE_CASE__ = """platform""" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ) -> List[str]: if attention_mask is None: __lowercase = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __lowercase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __lowercase = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowercase = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class A__ : def __init__( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=False , _UpperCAmelCase : int=99 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : str=0.02 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = bos_token_id __lowercase = initializer_range def a__ ( self : str ) -> Dict: """simple docstring""" __lowercase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __lowercase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __lowercase = shift_tokens_right(_UpperCAmelCase , 1 , 2 ) __lowercase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , ) __lowercase = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase , __lowercase = self.prepare_config_and_inputs() return config, inputs_dict def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = 20 __lowercase = model_class_name(_UpperCAmelCase ) __lowercase = model.encode(inputs_dict['input_ids'] ) __lowercase , __lowercase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) __lowercase = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowercase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) __lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) __lowercase = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , ) __lowercase = model.decode(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> int: """simple docstring""" __lowercase = 20 __lowercase = model_class_name(_UpperCAmelCase ) __lowercase = model.encode(inputs_dict['input_ids'] ) __lowercase , __lowercase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) __lowercase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowercase = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) __lowercase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) __lowercase = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) __lowercase = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase ) __lowercase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class A__ ( unittest.TestCase ): lowerCAmelCase__ : Any = 99 def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __lowercase = input_ids.shape[0] __lowercase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def a__ ( self : int ) -> Any: """simple docstring""" __lowercase , __lowercase , __lowercase = self._get_config_and_data() __lowercase = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase ) __lowercase = lm_model(input_ids=_UpperCAmelCase ) __lowercase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _UpperCAmelCase ) def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __lowercase = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase ) __lowercase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __lowercase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __lowercase = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) __lowercase = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _UpperCAmelCase ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __lowercase = shift_tokens_right(_UpperCAmelCase , 1 , 2 ) __lowercase = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum() __lowercase = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class A__ ( lowerCAmelCase__ , unittest.TestCase , lowerCAmelCase__ ): lowerCAmelCase__ : Any = True lowerCAmelCase__ : Tuple = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase__ : str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase = FlaxBlenderbotModelTester(self ) def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : List[Any] ) -> int: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(_UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : str ): return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) with self.subTest('JIT Enabled' ): __lowercase = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowercase = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase = model_class(_UpperCAmelCase ) __lowercase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) __lowercase = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ): return model.decode( decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , ) with self.subTest('JIT Enabled' ): __lowercase = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): __lowercase = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def a__ ( self : Dict ) -> Optional[Any]: """simple docstring""" for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __lowercase = np.ones((1, 1) ) * model.config.eos_token_id __lowercase = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' ) @slow def a__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25} __lowercase = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True} __lowercase = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCAmelCase ) __lowercase = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' ) __lowercase = ['Sam'] __lowercase = tokenizer(_UpperCAmelCase , return_tensors='jax' ) __lowercase = model.generate(**_UpperCAmelCase , **_UpperCAmelCase ) __lowercase = 'Sam is a great name. It means "sun" in Gaelic.' __lowercase = tokenizer.batch_decode(_UpperCAmelCase , **_UpperCAmelCase ) assert generated_txt[0].strip() == tgt_text
325
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = True __lowercase = True __lowercase = 99 __lowercase = 3_84 __lowercase = 2 __lowercase = 4 __lowercase = 37 __lowercase = 'gelu' __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_12 __lowercase = 16 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = 1_28 __lowercase = 2 __lowercase = 9 __lowercase = 1 __lowercase = None def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModel(config=_UpperCAmelCase ) __lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase = [input_ids, input_mask] __lowercase = model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" __lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.num_choices __lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" __lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase__ : List[str] = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : List[str] = False def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : int ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = True if hasattr(_UpperCAmelCase , 'use_cache' ): __lowercase = True __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) for model_class in self.all_model_classes: __lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = model_class(_UpperCAmelCase ) __lowercase = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' ) __lowercase = tf.keras.models.load_model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = outputs['encoder_hidden_states'] __lowercase = outputs['encoder_attentions'] else: __lowercase = outputs['hidden_states'] __lowercase = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __lowercase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_UpperCAmelCase ) def a__ ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase : int ): __lowercase = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __lowercase = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ): __lowercase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __lowercase = True __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A__ ( unittest.TestCase ): @slow def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(_UpperCAmelCase )[0] __lowercase = [1, 6, 7_68] self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
325
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class A__ ( unittest.TestCase ): @slow def a__ ( self : Tuple ) -> Dict: """simple docstring""" __lowercase = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) __lowercase = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(_UpperCAmelCase ) from datasets import load_dataset __lowercase = load_dataset('nielsr/rvlcdip-demo' ) __lowercase = dataset['train'][0]['image'].convert('RGB' ) __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_UpperCAmelCase ) __lowercase = outputs.logits __lowercase = torch.Size((1, 16) ) self.assertEqual(logits.shape , _UpperCAmelCase ) __lowercase = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_UpperCAmelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
325
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class A__ : def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" __lowercase = scheduler __lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] __lowercase = split_batches __lowercase = step_with_optimizer __lowercase = GradientState() def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowercase = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_last_lr() def a__ ( self : List[str] ) -> Tuple: """simple docstring""" return self.scheduler.state_dict() def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.scheduler.load_state_dict(_UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" return self.scheduler.get_lr() def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
325
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { """configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""], """configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""MaskFormerFeatureExtractor"""] SCREAMING_SNAKE_CASE__ = ["""MaskFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """MaskFormerForInstanceSegmentation""", """MaskFormerModel""", """MaskFormerPreTrainedModel""", ] SCREAMING_SNAKE_CASE__ = [ """MaskFormerSwinBackbone""", """MaskFormerSwinModel""", """MaskFormerSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
325
import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ = """src/transformers""" # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""") # Catches a line with else: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""") def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None: return None __lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.readlines() __lowercase = 0 while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure __lowercase = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowercase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ): __lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0] __lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: __lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowercase = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __lowercase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowercase = [] while ( line_index < len(SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowercase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int: def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowercase = [] for key in import_dict_objects.keys(): __lowercase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __lowercase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowercase = 'base imports' if key == 'none' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ) -> Tuple: __lowercase = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) __lowercase = parse_init(SCREAMING_SNAKE_CASE ) if objects is not None: __lowercase = analyze_results(*SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('\n'.join(SCREAMING_SNAKE_CASE ) ) if len(SCREAMING_SNAKE_CASE ) > 0: raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0: continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace(os.path.sep , '.' ) submodules.append(SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE ) return submodules SCREAMING_SNAKE_CASE__ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def __SCREAMING_SNAKE_CASE ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. __lowercase = importlib.util.spec_from_file_location( 'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) __lowercase = spec.loader.load_module() __lowercase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F"""{list_of_modules}\n""" 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
325
1
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets SCREAMING_SNAKE_CASE__ = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ SCREAMING_SNAKE_CASE__ = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ SCREAMING_SNAKE_CASE__ = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[int]=False ) -> List[str]: """simple docstring""" if rouge_types is None: __lowercase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] __lowercase = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase ) if use_aggregator: __lowercase = scoring.BootstrapAggregator() else: __lowercase = [] for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = scorer.score(_UpperCAmelCase , _UpperCAmelCase ) if use_aggregator: aggregator.add_scores(_UpperCAmelCase ) else: scores.append(_UpperCAmelCase ) if use_aggregator: __lowercase = aggregator.aggregate() else: __lowercase = {} for key in scores[0]: __lowercase = [score[key] for score in scores] return result
325
import logging import os from .state import PartialState class A__ ( logging.LoggerAdapter ): @staticmethod def a__ ( _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) __lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase ) __lowercase = kwargs.pop('in_order' , _UpperCAmelCase ) if self.isEnabledFor(_UpperCAmelCase ): if self._should_log(_UpperCAmelCase ): __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) elif in_order: __lowercase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) state.wait_for_everyone() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]: if log_level is None: __lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE ) __lowercase = logging.getLogger(SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
325
1
import logging import os from .state import PartialState class A__ ( logging.LoggerAdapter ): @staticmethod def a__ ( _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) __lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase ) __lowercase = kwargs.pop('in_order' , _UpperCAmelCase ) if self.isEnabledFor(_UpperCAmelCase ): if self._should_log(_UpperCAmelCase ): __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) elif in_order: __lowercase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) state.wait_for_everyone() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]: if log_level is None: __lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE ) __lowercase = logging.getLogger(SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
325
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: __lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowercase = [3, 3, 3, 3] __lowercase = [5, 5, 5, 5] elif "fl4" in model_name: __lowercase = [4, 4, 4, 4] __lowercase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowercase = [3, 3, 3, 3] if "lrf" in model_name: __lowercase = [3, 3, 3, 3] else: __lowercase = [2, 2, 2, 2] if "tiny" in model_name: __lowercase = 96 elif "small" in model_name: __lowercase = 96 elif "base" in model_name: __lowercase = 128 elif "large" in model_name: __lowercase = 192 elif "xlarge" in model_name: __lowercase = 256 elif "huge" in model_name: __lowercase = 352 # set label information __lowercase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowercase = 'imagenet-22k-id2label.json' else: __lowercase = 'imagenet-1k-id2label.json' __lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = {v: k for k, v in idalabel.items()} __lowercase = FocalNetConfig( embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , ) return config def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict: if "patch_embed.proj" in name: __lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowercase = 'encoder.' + name if "encoder.layers" in name: __lowercase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowercase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowercase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowercase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowercase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowercase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowercase = 'layernorm.weight' if name == "norm.bias": __lowercase = 'layernorm.bias' if "head" in name: __lowercase = name.replace('head' , 'classifier' ) else: __lowercase = 'focalnet.' + name return name def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]: # fmt: off __lowercase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowercase = model_name_to_url[model_name] print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE ) __lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowercase = state_dict.pop(SCREAMING_SNAKE_CASE ) __lowercase = val __lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE ) __lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE ) model.eval() # load state dict model.load_state_dict(SCREAMING_SNAKE_CASE ) # verify conversion __lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowercase = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , ) __lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) __lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ) __lowercase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 ) __lowercase = model(**SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] ) elif model_name == "focalnet-tiny-lrf": __lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] ) elif model_name == "focalnet-small": __lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] ) elif model_name == "focalnet-small-lrf": __lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] ) elif model_name == "focalnet-base": __lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] ) elif model_name == "focalnet-base-lrf": __lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""focalnet-tiny""", type=str, help="""Name of the FocalNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
325
1
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray] lowerCAmelCase__ : Optional[List[bool]] lowerCAmelCase__ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
325
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "mask2former" lowerCAmelCase__ : List[Any] = ["swin"] lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"} def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowercase = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = backbone_config.pop('model_type' ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a__ ( self : str ) -> Dict[str, any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
325
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=0.999 , SCREAMING_SNAKE_CASE : Optional[Any]="cosine" , ) -> Union[str, Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __lowercase = [] for i in range(SCREAMING_SNAKE_CASE ): __lowercase = i / num_diffusion_timesteps __lowercase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers] lowerCAmelCase__ : int = 2 @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.00_085 , _UpperCAmelCase : float = 0.012 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : Optional[bool] = False , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : str = "linspace" , _UpperCAmelCase : int = 0 , ) -> str: """simple docstring""" if trained_betas is not None: __lowercase = torch.tensor(_UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __lowercase = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowercase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowercase = betas_for_alpha_bar(_UpperCAmelCase , alpha_transform_type='cosine' ) elif beta_schedule == "exp": __lowercase = betas_for_alpha_bar(_UpperCAmelCase , alpha_transform_type='exp' ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __lowercase = 1.0 - self.betas __lowercase = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = use_karras_sigmas def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any=None ) -> Any: """simple docstring""" if schedule_timesteps is None: __lowercase = self.timesteps __lowercase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __lowercase = 1 if len(_UpperCAmelCase ) > 1 else 0 else: __lowercase = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep __lowercase = self._index_counter[timestep_int] return indices[pos].item() @property def a__ ( self : Optional[int] ) -> Any: """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def a__ ( self : Optional[int] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: """simple docstring""" __lowercase = self.index_for_timestep(_UpperCAmelCase ) __lowercase = self.sigmas[step_index] __lowercase = sample / ((sigma**2 + 1) ** 0.5) return sample def a__ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None , _UpperCAmelCase : Optional[int] = None , ) -> Optional[int]: """simple docstring""" __lowercase = num_inference_steps __lowercase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __lowercase = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __lowercase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __lowercase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase = (np.arange(_UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(_UpperCAmelCase ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) __lowercase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __lowercase = np.log(_UpperCAmelCase ) __lowercase = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase ) ) , _UpperCAmelCase ) if self.config.use_karras_sigmas: __lowercase = self._convert_to_karras(in_sigmas=_UpperCAmelCase , num_inference_steps=self.num_inference_steps ) __lowercase = np.array([self._sigma_to_t(_UpperCAmelCase , _UpperCAmelCase ) for sigma in sigmas] ) __lowercase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __lowercase = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) __lowercase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) __lowercase = torch.from_numpy(_UpperCAmelCase ) __lowercase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(_UpperCAmelCase ).startswith('mps' ): # mps does not support float64 __lowercase = timesteps.to(_UpperCAmelCase , dtype=torch.floataa ) else: __lowercase = timesteps.to(device=_UpperCAmelCase ) # empty dt and derivative __lowercase = None __lowercase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __lowercase = defaultdict(_UpperCAmelCase ) def a__ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> List[str]: """simple docstring""" __lowercase = np.log(_UpperCAmelCase ) # get distribution __lowercase = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range __lowercase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) __lowercase = low_idx + 1 __lowercase = log_sigmas[low_idx] __lowercase = log_sigmas[high_idx] # interpolate sigmas __lowercase = (low - log_sigma) / (low - high) __lowercase = np.clip(_UpperCAmelCase , 0 , 1 ) # transform interpolation to time range __lowercase = (1 - w) * low_idx + w * high_idx __lowercase = t.reshape(sigma.shape ) return t def a__ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int ) -> torch.FloatTensor: """simple docstring""" __lowercase = in_sigmas[-1].item() __lowercase = in_sigmas[0].item() __lowercase = 7.0 # 7.0 is the value used in the paper __lowercase = np.linspace(0 , 1 , _UpperCAmelCase ) __lowercase = sigma_min ** (1 / rho) __lowercase = sigma_max ** (1 / rho) __lowercase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.dt is None def a__ ( self : List[str] , _UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , _UpperCAmelCase : Union[float, torch.FloatTensor] , _UpperCAmelCase : Union[torch.FloatTensor, np.ndarray] , _UpperCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]: """simple docstring""" __lowercase = self.index_for_timestep(_UpperCAmelCase ) # advance index counter by 1 __lowercase = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __lowercase = self.sigmas[step_index] __lowercase = self.sigmas[step_index + 1] else: # 2nd order / Heun's method __lowercase = self.sigmas[step_index - 1] __lowercase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __lowercase = 0 __lowercase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __lowercase = sigma_hat if self.state_in_first_order else sigma_next __lowercase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __lowercase = sigma_hat if self.state_in_first_order else sigma_next __lowercase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": __lowercase = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.config.clip_sample: __lowercase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __lowercase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __lowercase = sigma_next - sigma_hat # store for 2nd order step __lowercase = derivative __lowercase = dt __lowercase = sample else: # 2. 2nd order / Heun's method __lowercase = (sample - pred_original_sample) / sigma_next __lowercase = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample __lowercase = self.dt __lowercase = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" __lowercase = None __lowercase = None __lowercase = None __lowercase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def a__ ( self : List[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor: """simple docstring""" __lowercase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase ): # mps does not support float64 __lowercase = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __lowercase = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __lowercase = self.timesteps.to(original_samples.device ) __lowercase = timesteps.to(original_samples.device ) __lowercase = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase ) for t in timesteps] __lowercase = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __lowercase = sigma.unsqueeze(-1 ) __lowercase = original_samples + noise * sigma return noisy_samples def __len__( self : int ) -> List[Any]: """simple docstring""" return self.config.num_train_timesteps
325
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __lowercase = TOKENIZER_CLASSES else: __lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __lowercase = TOKENIZER_CLASSES[tokenizer_name] __lowercase = True if checkpoint_name is None: __lowercase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowercase = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __lowercase , __lowercase = checkpoint.split('/' ) __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif add_prefix: __lowercase = checkpoint __lowercase = dump_path else: __lowercase = None __lowercase = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0] if next_char == "/": __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __lowercase = tokenizer.save_pretrained( SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(SCREAMING_SNAKE_CASE ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files.""" ) parser.add_argument( """--tokenizer_name""", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' """download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--checkpoint_name""", default=None, type=str, help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""", ) parser.add_argument( """--force_download""", action="""store_true""", help="""Re-download checkpoints.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : dict ) -> bool: __lowercase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowercase = set() return any( node not in visited and depth_first_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for node in graph ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : set , SCREAMING_SNAKE_CASE : set ) -> bool: visited.add(SCREAMING_SNAKE_CASE ) rec_stk.add(SCREAMING_SNAKE_CASE ) for node in graph[vertex]: if node not in visited: if depth_first_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(SCREAMING_SNAKE_CASE ) return False if __name__ == "__main__": from doctest import testmod testmod()
325
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
1
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () SCREAMING_SNAKE_CASE__ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). SCREAMING_SNAKE_CASE__ = [0, 25, 50] SCREAMING_SNAKE_CASE__ = [25, 50, 75] SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca) SCREAMING_SNAKE_CASE__ = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. SCREAMING_SNAKE_CASE__ = np.ones(75) SCREAMING_SNAKE_CASE__ = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] SCREAMING_SNAKE_CASE__ = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) SCREAMING_SNAKE_CASE__ = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] SCREAMING_SNAKE_CASE__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("""Young""") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("""Middle aged""") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("""union""") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("""intersection""") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("""complement_a""") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("""difference a/b""") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("""alg_sum""") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("""alg_product""") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("""bdd_sum""") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("""bdd_difference""") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
325
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp SCREAMING_SNAKE_CASE__ = 5 SCREAMING_SNAKE_CASE__ = 10 @require_sentencepiece @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase = sp.SentencePieceProcessor() spm_model.Load(_UpperCAmelCase ) __lowercase = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : str ) -> int: """simple docstring""" __lowercase = '<pad>' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 10_01 ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) __lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class A__ ( unittest.TestCase ): lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase__ : Dict = "C'est trop cool" lowerCAmelCase__ : List[Any] = "Esto es genial" @classmethod def a__ ( cls : Any ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def a__ ( self : str ) -> int: """simple docstring""" self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids ) __lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2] __lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = 'fr' __lowercase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _UpperCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __lowercase = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
325
1
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): def __init__( self : Dict , *_UpperCAmelCase : int , **_UpperCAmelCase : Tuple ) -> None: """simple docstring""" warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
325
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = "layoutlmv3" def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = max_ad_position_embeddings __lowercase = coordinate_size __lowercase = shape_size __lowercase = has_relative_attention_bias __lowercase = rel_pos_bins __lowercase = max_rel_pos __lowercase = has_spatial_attention_bias __lowercase = rel_ad_pos_bins __lowercase = max_rel_ad_pos __lowercase = text_embed __lowercase = visual_embed __lowercase = input_size __lowercase = num_channels __lowercase = patch_size __lowercase = classifier_dropout class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = version.parse("1.12" ) @property def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def a__ ( self : int ) -> float: """simple docstring""" return 1e-5 @property def a__ ( self : str ) -> int: """simple docstring""" return 12 def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowercase = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = dict( processor( _UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) ) return inputs
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> None: __lowercase = generate_pascal_triangle(SCREAMING_SNAKE_CASE ) for row_idx in range(SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=' ' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ' ) else: print(triangle[row_idx][col_idx] , end='' ) print() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[list[int]]: if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) __lowercase = [] for current_row_idx in range(SCREAMING_SNAKE_CASE ): __lowercase = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) triangle.append(SCREAMING_SNAKE_CASE ) return triangle def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __lowercase , __lowercase = 1, 1 for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ): calculate_current_element( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return current_row def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , ) -> None: __lowercase = triangle[current_row_idx - 1][current_col_idx - 1] __lowercase = triangle[current_row_idx - 1][current_col_idx] __lowercase = above_to_left_elt + above_to_right_elt def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[list[int]]: if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) __lowercase = [[1]] for row_index in range(1 , SCREAMING_SNAKE_CASE ): __lowercase = [0] + result[-1] + [0] __lowercase = row_index + 1 # Calculate the number of distinct elements in a row __lowercase = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) ) __lowercase = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __lowercase = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __lowercase = row_first_half + row_second_half result.append(SCREAMING_SNAKE_CASE ) return result def __SCREAMING_SNAKE_CASE ( ) -> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(SCREAMING_SNAKE_CASE : Callable , SCREAMING_SNAKE_CASE : int ) -> None: __lowercase = F"""{func.__name__}({value})""" __lowercase = timeit(F"""__main__.{call}""" , setup='import __main__' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F"""{call:38} -- {timing:.4f} seconds""" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
325
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE__ = """RegNetConfig""" # Base docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = """tabby, tabby cat""" SCREAMING_SNAKE_CASE__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A__ ( nn.Module ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any: """simple docstring""" super().__init__() __lowercase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowercase = config.num_channels def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase = self.embedder(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: """simple docstring""" super().__init__() __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) __lowercase = nn.Sequential( nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def a__ ( self : str , _UpperCAmelCase : Dict ) -> str: """simple docstring""" __lowercase = self.pooler(_UpperCAmelCase ) __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_state * attention return hidden_state class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict: """simple docstring""" super().__init__() __lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , ) def a__ ( self : Any , _UpperCAmelCase : str ) -> int: """simple docstring""" __lowercase = self.layers(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int: """simple docstring""" super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(_UpperCAmelCase ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = RegNetConfig lowerCAmelCase__ : Optional[int] = "regnet" lowerCAmelCase__ : Dict = "pixel_values" lowerCAmelCase__ : List[str] = True def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = value SCREAMING_SNAKE_CASE__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SCREAMING_SNAKE_CASE__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A__ ( lowerCAmelCase__ ): def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config __lowercase = RegNetEmbeddings(_UpperCAmelCase ) __lowercase = RegNetEncoder(_UpperCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(_UpperCAmelCase ) __lowercase = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config.num_labels __lowercase = RegNetModel(_UpperCAmelCase ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(_UpperCAmelCase ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = 'single_label_classification' else: __lowercase = 'multi_label_classification' if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
325
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Dict: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match""" __lowercase = nn.Parameter(SCREAMING_SNAKE_CASE ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match""" __lowercase = nn.Parameter(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Dict: # set torch weights for 1-to-1 comparison __lowercase = np.asarray(weights[0] ) __lowercase = np.asarray(weights[1] ) __lowercase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> str: # set torch weights for 1-to-1 comparison __lowercase = np.asarray(weights[0] ) __lowercase = np.asarray(weights[1] ) __lowercase = np.asarray(weights[2] ) __lowercase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , ) set_param( torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , ) set_param( torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , ) set_param( torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ) -> int: # layernorm 1 __lowercase = weights[0][0][0] __lowercase = np.asarray(layer_norm_a[0] ) __lowercase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , ) # lsh weights + output __lowercase = weights[0][1] if len(SCREAMING_SNAKE_CASE ) < 4: set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE ) else: set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE ) # intermediate weighs __lowercase = weights[2][0][1][2] # Chunked Feed Forward if len(SCREAMING_SNAKE_CASE ) == 4: __lowercase = intermediate_weights[2] # layernorm 2 __lowercase = np.asarray(intermediate_weights[0][0] ) __lowercase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , ) # intermediate dense __lowercase = np.asarray(intermediate_weights[1][0] ) __lowercase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , ) # intermediate out __lowercase = np.asarray(intermediate_weights[4][0] ) __lowercase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ) -> List[Any]: # reformer model __lowercase = torch_model.reformer # word embeds __lowercase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , ) if isinstance(weights[3] , SCREAMING_SNAKE_CASE ): __lowercase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __lowercase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F"""{position_embeddings[emb_idx]} emb does not match""" __lowercase = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) ) __lowercase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __lowercase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # output layer norm __lowercase = np.asarray(weights[7][0] ) __lowercase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , ) # output embeddings __lowercase = np.asarray(weights[9][0] ) __lowercase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> str: # Initialise PyTorch model __lowercase = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE ) print(F"""Building PyTorch model from configuration: {config}""" ) __lowercase = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: __lowercase = pickle.load(SCREAMING_SNAKE_CASE )['weights'] set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained Reformer model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
325
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
325
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class A__ : def __init__( self : int , _UpperCAmelCase : Any , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = 30 __lowercase = self.seq_length + self.mem_len __lowercase = 15 __lowercase = True __lowercase = True __lowercase = 99 __lowercase = [10, 50, 80] __lowercase = 32 __lowercase = 32 __lowercase = 4 __lowercase = 8 __lowercase = 1_28 __lowercase = 2 __lowercase = 2 __lowercase = None __lowercase = 1 __lowercase = 0 __lowercase = 3 __lowercase = self.vocab_size - 1 __lowercase = 0.01 def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def a__ ( self : Tuple ) -> Dict: """simple docstring""" random.seed(self.seed ) tf.random.set_seed(self.seed ) def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = TFTransfoXLModel(_UpperCAmelCase ) __lowercase , __lowercase = model(_UpperCAmelCase ).to_tuple() __lowercase = {'input_ids': input_ids_a, 'mems': mems_a} __lowercase , __lowercase = model(_UpperCAmelCase ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" __lowercase = TFTransfoXLLMHeadModel(_UpperCAmelCase ) __lowercase , __lowercase = model(_UpperCAmelCase ).to_tuple() __lowercase = {'input_ids': input_ids_a, 'labels': lm_labels} __lowercase , __lowercase = model(_UpperCAmelCase ).to_tuple() __lowercase , __lowercase = model([input_ids_a, mems_a] ).to_tuple() __lowercase = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels} __lowercase , __lowercase = model(_UpperCAmelCase ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def a__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = TFTransfoXLForSequenceClassification(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : int ) -> Any: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs __lowercase = {'input_ids': input_ids_a} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Any = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) lowerCAmelCase__ : List[Any] = () if is_tf_available() else () lowerCAmelCase__ : Optional[Any] = ( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : Union[str, Any] = False def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def a__ ( self : str ) -> Dict: """simple docstring""" __lowercase = TFTransfoXLModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 ) def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Optional[int] ) -> Any: """simple docstring""" self.model_tester.set_seed() __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" self.model_tester.set_seed() __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase ) def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __lowercase = model_class(_UpperCAmelCase ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __lowercase = model.get_output_embeddings() assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer ) __lowercase = model.get_bias() assert name is None else: __lowercase = model.get_output_embeddings() assert x is None __lowercase = model.get_bias() assert name is None def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @slow def a__ ( self : int ) -> Any: """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFTransfoXLModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' ) def a__ ( self : Dict ) -> str: """simple docstring""" pass @require_tf class A__ ( unittest.TestCase ): @unittest.skip('Skip test until #12651 is resolved.' ) @slow def a__ ( self : str ) -> Any: """simple docstring""" __lowercase = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' ) # fmt: off __lowercase = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __lowercase = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __lowercase = model.generate(_UpperCAmelCase , max_length=2_00 , do_sample=_UpperCAmelCase ) self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
325
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) class A__ ( enum.Enum ): lowerCAmelCase__ : Dict = "all_checks" lowerCAmelCase__ : List[Any] = "basic_checks" lowerCAmelCase__ : Dict = "no_checks" class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]: if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __lowercase = ' for ' + verification_name if verification_name is not None else '' if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]: if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) ) logger.info('All the splits matched successfully.' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict: if record_checksum: __lowercase = shaaaa() with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b'' ): m.update(SCREAMING_SNAKE_CASE ) __lowercase = m.hexdigest() else: __lowercase = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
325
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
import math def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict: __lowercase = factor * value __lowercase = value while not is_prime(SCREAMING_SNAKE_CASE ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE ) return value
325
1
from __future__ import annotations from collections.abc import Iterator class A__ : def __init__( self : List[Any] , _UpperCAmelCase : int ) -> None: """simple docstring""" __lowercase = value __lowercase = None __lowercase = None class A__ : def __init__( self : int , _UpperCAmelCase : Node ) -> None: """simple docstring""" __lowercase = tree def a__ ( self : Tuple , _UpperCAmelCase : Node | None ) -> int: """simple docstring""" if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : Union[str, Any] ) -> Iterator[int]: """simple docstring""" yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
325
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parents[3] / """src""" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) SCREAMING_SNAKE_CASE__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""} SCREAMING_SNAKE_CASE__ = """zero2""" SCREAMING_SNAKE_CASE__ = """zero3""" SCREAMING_SNAKE_CASE__ = [ZEROa, ZEROa] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> Optional[int]: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param __lowercase = parameterized.to_safe_name('_'.join(str(SCREAMING_SNAKE_CASE ) for x in param.args ) ) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test SCREAMING_SNAKE_CASE__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class A__ ( lowerCAmelCase__ ): @parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase ) def a__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" self.run_and_check( stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , ) @require_torch_multi_gpu @parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase ) def a__ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] ) -> Dict: """simple docstring""" self.run_and_check( stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , ) @parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase ) def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> int: """simple docstring""" self.run_and_check( stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , ) @require_torch_multi_gpu @parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase ) def a__ ( self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" self.run_and_check( stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , ) def a__ ( self : List[str] , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" pass def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> List[str]: """simple docstring""" __lowercase = models[model] __lowercase = self.run_trainer( stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , ) self.do_checks(_UpperCAmelCase ) return output_dir def a__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> List[Any]: """simple docstring""" __lowercase = self.get_auto_remove_tmp_dir('./xxx' , after=_UpperCAmelCase ) __lowercase = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(_UpperCAmelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(['--fp16'] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files __lowercase = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() __lowercase = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] __lowercase = self.get_launcher(_UpperCAmelCase ) __lowercase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_UpperCAmelCase , env=self.get_env() ) return output_dir def a__ ( self : str , _UpperCAmelCase : List[Any]=False ) -> Tuple: """simple docstring""" __lowercase = min(2 , get_gpu_count() ) if distributed else 1 return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
325
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
1
from PIL import Image def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Image , SCREAMING_SNAKE_CASE : int ) -> Image: __lowercase = (259 * (level + 255)) / (255 * (259 - level)) def contrast(SCREAMING_SNAKE_CASE : int ) -> int: return int(128 + factor * (c - 128) ) return img.point(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 SCREAMING_SNAKE_CASE__ = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
325
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = "transfo-xl" lowerCAmelCase__ : int = ["mems"] lowerCAmelCase__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple: """simple docstring""" __lowercase = vocab_size __lowercase = [] self.cutoffs.extend(_UpperCAmelCase ) if proj_share_all_but_first: __lowercase = [False] + [True] * len(self.cutoffs ) else: __lowercase = [False] + [False] * len(self.cutoffs ) __lowercase = d_model __lowercase = d_embed __lowercase = d_head __lowercase = d_inner __lowercase = div_val __lowercase = pre_lnorm __lowercase = n_layer __lowercase = n_head __lowercase = mem_len __lowercase = same_length __lowercase = attn_type __lowercase = clamp_len __lowercase = sample_softmax __lowercase = adaptive __lowercase = dropout __lowercase = dropatt __lowercase = untie_r __lowercase = init __lowercase = init_range __lowercase = proj_init_std __lowercase = init_std __lowercase = layer_norm_epsilon super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
325
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: for attribute in key.split('.' ): __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __lowercase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowercase = None for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __lowercase = True elif name.split('.' )[0] == "proj": __lowercase = fairseq_model.proj __lowercase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowercase = True if "*" in mapped_key: __lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __lowercase = 'weight_g' elif "weight_v" in name: __lowercase = 'weight_v' elif "bias" in name: __lowercase = 'bias' elif "weight" in name: __lowercase = 'weight' else: __lowercase = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: __lowercase = full_name.split('conv_layers.' )[-1] __lowercase = name.split('.' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) __lowercase = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __lowercase = f.readlines() __lowercase = [line.split(' ' )[0] for line in lines] __lowercase = len(SCREAMING_SNAKE_CASE ) __lowercase = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]: __lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaConfig.from_pretrained( SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE ) __lowercase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowercase = model[0].eval() # set weights for wav2vec2 encoder __lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE ) __lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE ) __lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __lowercase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) __lowercase = False # add projection layer __lowercase = nn.Parameter(projection_layer.weight ) __lowercase = nn.Parameter(projection_layer.bias ) __lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE ) with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = hf_wavavec.config.to_dict() __lowercase = tokenizer.pad_token_id __lowercase = tokenizer.bos_token_id __lowercase = tokenizer.eos_token_id __lowercase = 'speech_to_text_2' __lowercase = 'wav2vec2' __lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
325
1
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple: __lowercase = tmp_path / 'file.csv' __lowercase = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20\n ' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return str(SCREAMING_SNAKE_CASE ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> List[Any]: __lowercase = tmp_path / 'malformed_file.csv' __lowercase = textwrap.dedent( '\\n header1,header2\n 1,2\n 10,20,\n ' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return str(SCREAMING_SNAKE_CASE ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ) -> List[str]: __lowercase = tmp_path / 'csv_with_image.csv' __lowercase = textwrap.dedent( F"""\ image {image_file} """ ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return str(SCREAMING_SNAKE_CASE ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Any: __lowercase = tmp_path / 'csv_with_label.csv' __lowercase = textwrap.dedent( '\\n label\n good\n bad\n good\n ' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return str(SCREAMING_SNAKE_CASE ) @pytest.fixture def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> str: __lowercase = tmp_path / 'csv_with_int_list.csv' __lowercase = textwrap.dedent( '\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as f: f.write(SCREAMING_SNAKE_CASE ) return str(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> int: __lowercase = Csv() __lowercase = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(SCREAMING_SNAKE_CASE , match='Error tokenizing data' ): for _ in generator: pass assert any( record.levelname == 'ERROR' and 'Failed to read file' in record.message and os.path.basename(SCREAMING_SNAKE_CASE ) in record.message for record in caplog.records ) @require_pil def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __lowercase = f.read().splitlines()[1] __lowercase = Csv(encoding='utf-8' , features=Features({'image': Image()} ) ) __lowercase = csv._generate_tables([[csv_file_with_image]] ) __lowercase = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('image' ).type == Image()() __lowercase = pa_table.to_pydict()['image'] assert generated_content == [{"path": image_file, "bytes": None}] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: with open(SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __lowercase = f.read().splitlines()[1:] __lowercase = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) ) __lowercase = csv._generate_tables([[csv_file_with_label]] ) __lowercase = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )() __lowercase = pa_table.to_pydict()['label'] assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(SCREAMING_SNAKE_CASE ) for label in labels] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]: __lowercase = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda SCREAMING_SNAKE_CASE : [int(SCREAMING_SNAKE_CASE ) for i in x.split()]} ) __lowercase = csv._generate_tables([[csv_file_with_int_list]] ) __lowercase = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('int_list' ).type ) __lowercase = pa_table.to_pydict()['int_list'] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
325
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: __lowercase = [0 for i in range(r + 1 )] # nc0 = 1 __lowercase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
325
1
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any ) -> Dict: __lowercase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE ) __lowercase = flatten_dict(SCREAMING_SNAKE_CASE ) return flax_params def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Any: __lowercase = {} __lowercase = { 'token_embedder': 'embeddings', 'encoder_norm': 'layernorm', 'kernel': 'weight', '.out': '.output', 'scale': 'weight', 'embedders_0.pos_embedding': 'row_embedder.weight', 'embedders_1.pos_embedding': 'column_embedder.weight', } __lowercase = { 'query': 'attention.query', 'key': 'attention.key', 'value': 'attention.value', 'output.dense': 'output', 'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o', 'pre_self_attention_layer_norm': 'self_attention.layer_norm', 'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm', 'mlp.': 'mlp.DenseReluDense.', 'pre_mlp_layer_norm': 'mlp.layer_norm', 'self_attention.o': 'self_attention.attention.o', 'decoder.embeddings.embedding': 'decoder.embed_tokens.weight', 'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight', 'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.logits_dense.weight': 'decoder.lm_head.weight', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __lowercase = '.'.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __lowercase = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __lowercase = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __lowercase = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE ) __lowercase = new_key.replace('encoder' , 'encoder.encoder' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __lowercase = re.sub(R'layers_(\d+)' , R'layer.\1' , SCREAMING_SNAKE_CASE ) __lowercase = flax_dict[key] __lowercase = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __lowercase = torch.from_numpy(converted_dict[key].T ) else: __lowercase = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Tuple=False ) -> Dict: __lowercase = get_flax_param(SCREAMING_SNAKE_CASE ) if not use_large: __lowercase = PixaStructVisionConfig() __lowercase = PixaStructTextConfig() else: __lowercase = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) __lowercase = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) __lowercase = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE ) __lowercase = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE ) __lowercase = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) __lowercase = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' ) __lowercase = PixaStructImageProcessor() __lowercase = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) if use_large: __lowercase = 4096 __lowercase = True # mkdir if needed os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) print('Model saved in {}'.format(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
325
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = ["vqvae"] def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase ) def a__ ( self : Tuple ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00 @torch.no_grad() def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" __lowercase = steps or self.get_default_steps() self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowercase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowercase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_UpperCAmelCase , device=self.device , ) __lowercase = noise __lowercase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase ) __lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __lowercase = (input_image / 2_55) * 2 - 1 __lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample( generator=_UpperCAmelCase )[0] __lowercase = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] ) __lowercase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowercase = int(mask_start_secs * pixels_per_second ) __lowercase = int(mask_end_secs * pixels_per_second ) __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _UpperCAmelCase ): __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample'] else: __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] if isinstance(self.scheduler , _UpperCAmelCase ): __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] else: __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] if mask is not None: if mask_start > 0: __lowercase = mask[:, step, :, :mask_start] if mask_end > 0: __lowercase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowercase = 1 / self.vqvae.config.scaling_factor * images __lowercase = self.vqvae.decode(_UpperCAmelCase )['sample'] __lowercase = (images / 2 + 0.5).clamp(0 , 1 ) __lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowercase = (images * 2_55).round().astype('uint8' ) __lowercase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) ) __lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) ) @torch.no_grad() def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , _UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __lowercase = (sample / 2_55) * 2 - 1 __lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowercase = self.scheduler.alphas_cumprod[t] __lowercase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowercase = 1 - alpha_prod_t __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] __lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor: """simple docstring""" __lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 0 ) -> list: __lowercase = length or len(SCREAMING_SNAKE_CASE ) __lowercase = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: __lowercase , __lowercase = list_data[i + 1], list_data[i] __lowercase = True return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
325
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. SCREAMING_SNAKE_CASE__ = 10 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if array[i] == target: return i return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __lowercase = one_third - 1 elif array[two_third] < target: __lowercase = two_third + 1 else: __lowercase = one_third + 1 __lowercase = two_third - 1 else: return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip() SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip()) SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target) SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
325
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : def __init__( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=True , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=2_24 , _UpperCAmelCase : Union[str, Any]=10_00 , _UpperCAmelCase : Union[str, Any]=[3, 3, 6, 4] , _UpperCAmelCase : Any=[48, 56, 1_12, 2_20] , ) -> List[str]: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = is_training __lowercase = use_labels __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = num_labels __lowercase = image_size __lowercase = layer_depths __lowercase = embed_dims def a__ ( self : str ) -> str: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1e-5 , ) def a__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase = SwiftFormerModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def a__ ( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.num_labels __lowercase = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) __lowercase = SwiftFormerForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : str ) -> str: """simple docstring""" ((__lowercase) , (__lowercase) , (__lowercase)) = self.prepare_config_and_inputs() __lowercase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : str = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () lowerCAmelCase__ : Union[str, Any] = ( {"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Union[str, Any] = False def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = SwiftFormerModelTester(self ) __lowercase = ConfigTester( self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def a__ ( self : int ) -> str: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='SwiftFormer does not use inputs_embeds' ) def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" pass def a__ ( self : int ) -> Dict: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_UpperCAmelCase ) __lowercase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_UpperCAmelCase ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : Dict ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = SwiftFormerModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason='SwiftFormer does not output attentions' ) def a__ ( self : Dict ) -> Optional[int]: """simple docstring""" pass def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" def check_hidden_states_output(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ): __lowercase = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = outputs.hidden_states __lowercase = 8 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(_UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" def _config_zero_init(_UpperCAmelCase : Optional[int] ): __lowercase = copy.deepcopy(_UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(_UpperCAmelCase , _UpperCAmelCase , 1e-1_0 ) if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ): __lowercase = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) ) setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return configs_no_init __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: __lowercase = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def a__ ( self : Dict ) -> Tuple: """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( ) -> int: __lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def a__ ( self : str ) -> Optional[int]: """simple docstring""" return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs' ) if is_vision_available() else None @slow def a__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __lowercase = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs' ).to(_UpperCAmelCase ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_UpperCAmelCase ) # verify the logits __lowercase = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __lowercase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
325
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = module __lowercase = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) __lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ : int = "bigscience/bloom-1b7" # Constant values lowerCAmelCase__ : Any = 2.109659552692574 lowerCAmelCase__ : str = "Hello my name is" lowerCAmelCase__ : Any = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) lowerCAmelCase__ : List[Any] = 10 def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> int: """simple docstring""" __lowercase = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) __lowercase = config.to_dict() __lowercase = config.to_diff_dict() __lowercase = config.to_json_string() def a__ ( self : Dict ) -> Tuple: """simple docstring""" from bitsandbytes.nn import Paramsabit __lowercase = self.model_fpaa.get_memory_footprint() __lowercase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowercase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def a__ ( self : Tuple ) -> str: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def a__ ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = BitsAndBytesConfig() __lowercase = True __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : str ) -> List[str]: """simple docstring""" with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_fpaa.to(torch.floataa ) __lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowercase = self.model_fpaa.to('cpu' ) # Check this does not throw an error __lowercase = self.model_fpaa.half() # Check this does not throw an error __lowercase = self.model_fpaa.float() def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): @classmethod def a__ ( cls : int ) -> Tuple: """simple docstring""" __lowercase = 't5-small' __lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __lowercase = AutoTokenizer.from_pretrained(cls.model_name ) __lowercase = 'Translate in German: Hello, my dog is cute' def a__ ( self : List[Any] ) -> Dict: """simple docstring""" gc.collect() torch.cuda.empty_cache() def a__ ( self : int ) -> int: """simple docstring""" from transformers import TaForConditionalGeneration __lowercase = TaForConditionalGeneration._keep_in_fpaa_modules __lowercase = None # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) __lowercase = modules def a__ ( self : str ) -> Optional[Any]: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" super().setUp() # model_name __lowercase = 'bigscience/bloom-560m' __lowercase = 't5-small' # Different types of model __lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model __lowercase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model __lowercase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : int ) -> List[str]: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> str: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( lowerCAmelCase__ ): def a__ ( self : str ) -> str: """simple docstring""" super().setUp() def a__ ( self : Dict ) -> Any: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> int: """simple docstring""" __lowercase = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowercase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() def a__ ( self : List[Any] ) -> int: """simple docstring""" __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class A__ ( lowerCAmelCase__ ): def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = 'facebook/opt-350m' super().setUp() def a__ ( self : Dict ) -> List[str]: """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowercase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowercase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): __lowercase = LoRALayer(module.q_proj , rank=16 ) __lowercase = LoRALayer(module.k_proj , rank=16 ) __lowercase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowercase = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Any = "gpt2-xl" lowerCAmelCase__ : str = 3.3191854854152187
325
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self : Optional[Any] , _UpperCAmelCase : int = 1_28 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : float = 2_000.0 , _UpperCAmelCase : int = 7_68 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : float = 0.1 , ) -> List[str]: """simple docstring""" super().__init__() __lowercase = nn.Sequential( nn.Linear(_UpperCAmelCase , d_model * 4 , bias=_UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCAmelCase ) , nn.SiLU() , ) __lowercase = nn.Embedding(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = False __lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.Dropout(p=_UpperCAmelCase ) __lowercase = nn.ModuleList() for lyr_num in range(_UpperCAmelCase ): # FiLM conditional T5 decoder __lowercase = DecoderLayer(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase ) self.decoders.append(_UpperCAmelCase ) __lowercase = TaLayerNorm(_UpperCAmelCase ) __lowercase = nn.Dropout(p=_UpperCAmelCase ) __lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) def a__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" __lowercase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def a__ ( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> List[Any]: """simple docstring""" __lowercase , __lowercase , __lowercase = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __lowercase = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __lowercase = self.conditioning_emb(_UpperCAmelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __lowercase = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __lowercase = torch.broadcast_to( torch.arange(_UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __lowercase = self.position_encoding(_UpperCAmelCase ) __lowercase = self.continuous_inputs_projection(_UpperCAmelCase ) inputs += position_encodings __lowercase = self.dropout(_UpperCAmelCase ) # decoder: No padding present. __lowercase = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __lowercase = [(x, self.encoder_decoder_mask(_UpperCAmelCase , _UpperCAmelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings __lowercase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __lowercase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __lowercase = lyr( _UpperCAmelCase , conditioning_emb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )[0] __lowercase = self.decoder_norm(_UpperCAmelCase ) __lowercase = self.post_dropout(_UpperCAmelCase ) __lowercase = self.spec_out(_UpperCAmelCase ) return spec_out class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=1e-6 ) -> Union[str, Any]: """simple docstring""" super().__init__() __lowercase = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=_UpperCAmelCase , d_kv=_UpperCAmelCase , num_heads=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase , layer_norm_epsilon=_UpperCAmelCase ) ) def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , ) -> List[Any]: """simple docstring""" __lowercase = self.layer[0]( _UpperCAmelCase , conditioning_emb=_UpperCAmelCase , attention_mask=_UpperCAmelCase , ) if encoder_hidden_states is not None: __lowercase = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to( encoder_hidden_states.dtype ) __lowercase = self.layer[1]( _UpperCAmelCase , key_value_states=_UpperCAmelCase , attention_mask=_UpperCAmelCase , ) # Apply Film Conditional Feed Forward layer __lowercase = self.layer[-1](_UpperCAmelCase , _UpperCAmelCase ) return (hidden_states,) class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" super().__init__() __lowercase = TaLayerNorm(_UpperCAmelCase ) __lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase ) __lowercase = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase ) __lowercase = nn.Dropout(_UpperCAmelCase ) def a__ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : int=None , ) -> Tuple: """simple docstring""" __lowercase = self.layer_norm(_UpperCAmelCase ) if conditioning_emb is not None: __lowercase = self.FiLMLayer(_UpperCAmelCase , _UpperCAmelCase ) # Self-attention block __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_states + self.dropout(_UpperCAmelCase ) return hidden_states class A__ ( nn.Module ): def __init__( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = Attention(query_dim=_UpperCAmelCase , heads=_UpperCAmelCase , dim_head=_UpperCAmelCase , out_bias=_UpperCAmelCase , scale_qk=_UpperCAmelCase ) __lowercase = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase ) __lowercase = nn.Dropout(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , ) -> Any: """simple docstring""" __lowercase = self.layer_norm(_UpperCAmelCase ) __lowercase = self.attention( _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , ) __lowercase = hidden_states + self.dropout(_UpperCAmelCase ) return layer_output class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> str: """simple docstring""" super().__init__() __lowercase = TaDenseGatedActDense(d_model=_UpperCAmelCase , d_ff=_UpperCAmelCase , dropout_rate=_UpperCAmelCase ) __lowercase = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCAmelCase ) __lowercase = TaLayerNorm(_UpperCAmelCase , eps=_UpperCAmelCase ) __lowercase = nn.Dropout(_UpperCAmelCase ) def a__ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None ) -> Any: """simple docstring""" __lowercase = self.layer_norm(_UpperCAmelCase ) if conditioning_emb is not None: __lowercase = self.film(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.DenseReluDense(_UpperCAmelCase ) __lowercase = hidden_states + self.dropout(_UpperCAmelCase ) return hidden_states class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> List[str]: """simple docstring""" super().__init__() __lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.Dropout(_UpperCAmelCase ) __lowercase = NewGELUActivation() def a__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.act(self.wi_a(_UpperCAmelCase ) ) __lowercase = self.wi_a(_UpperCAmelCase ) __lowercase = hidden_gelu * hidden_linear __lowercase = self.dropout(_UpperCAmelCase ) __lowercase = self.wo(_UpperCAmelCase ) return hidden_states class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=1e-6 ) -> str: """simple docstring""" super().__init__() __lowercase = nn.Parameter(torch.ones(_UpperCAmelCase ) ) __lowercase = eps def a__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowercase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_UpperCAmelCase ) __lowercase = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __lowercase = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class A__ ( nn.Module ): def a__ ( self : List[Any] , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor: """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(_UpperCAmelCase , 3.0 )) )) class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> int: """simple docstring""" super().__init__() __lowercase = nn.Linear(_UpperCAmelCase , out_features * 2 , bias=_UpperCAmelCase ) def a__ ( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> List[Any]: """simple docstring""" __lowercase = self.scale_bias(_UpperCAmelCase ) __lowercase , __lowercase = torch.chunk(_UpperCAmelCase , 2 , -1 ) __lowercase = x * (1 + scale) + shift return x
325
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = True __lowercase = True __lowercase = 99 __lowercase = 3_84 __lowercase = 2 __lowercase = 4 __lowercase = 37 __lowercase = 'gelu' __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_12 __lowercase = 16 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = 1_28 __lowercase = 2 __lowercase = 9 __lowercase = 1 __lowercase = None def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModel(config=_UpperCAmelCase ) __lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase = [input_ids, input_mask] __lowercase = model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" __lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.num_choices __lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" __lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase__ : List[str] = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : List[str] = False def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : int ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = True if hasattr(_UpperCAmelCase , 'use_cache' ): __lowercase = True __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) for model_class in self.all_model_classes: __lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = model_class(_UpperCAmelCase ) __lowercase = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' ) __lowercase = tf.keras.models.load_model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = outputs['encoder_hidden_states'] __lowercase = outputs['encoder_attentions'] else: __lowercase = outputs['hidden_states'] __lowercase = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __lowercase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_UpperCAmelCase ) def a__ ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase : int ): __lowercase = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __lowercase = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ): __lowercase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __lowercase = True __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A__ ( unittest.TestCase ): @slow def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(_UpperCAmelCase )[0] __lowercase = [1, 6, 7_68] self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
325
1
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : torch.FloatTensor lowerCAmelCase__ : Optional[torch.FloatTensor] = None def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]=0.999 , SCREAMING_SNAKE_CASE : List[str]="cosine" , ) -> List[Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE : int ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE : Any ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __lowercase = [] for i in range(SCREAMING_SNAKE_CASE ): __lowercase = i / num_diffusion_timesteps __lowercase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE ) / alpha_bar_fn(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ : str = 1 @register_to_config def __init__( self : List[str] , _UpperCAmelCase : int = 10_00 , _UpperCAmelCase : float = 0.0_001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = 0 , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : float = 1.0 , **_UpperCAmelCase : Dict , ) -> int: """simple docstring""" if kwargs.get('set_alpha_to_one' , _UpperCAmelCase ) is not None: __lowercase = ( 'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.' ) deprecate('set_alpha_to_one' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) __lowercase = kwargs['set_alpha_to_one'] if trained_betas is not None: __lowercase = torch.tensor(_UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "linear": __lowercase = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __lowercase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __lowercase = betas_for_alpha_bar(_UpperCAmelCase ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __lowercase = 1.0 - self.betas __lowercase = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __lowercase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __lowercase = 1.0 # setable values __lowercase = None __lowercase = torch.from_numpy(np.arange(0 , _UpperCAmelCase ).copy().astype(np.intaa ) ) def a__ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor: """simple docstring""" return sample def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None ) -> int: """simple docstring""" if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:""" f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle""" f""" maximal {self.config.num_train_timesteps} timesteps.""" ) __lowercase = num_inference_steps __lowercase = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __lowercase = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) __lowercase = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase ) self.timesteps += self.config.steps_offset def a__ ( self : List[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: """simple docstring""" __lowercase = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __lowercase = self.alphas_cumprod[timestep] __lowercase = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __lowercase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __lowercase = model_output elif self.config.prediction_type == "sample": __lowercase = model_output __lowercase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __lowercase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __lowercase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or""" ' `v_prediction`' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __lowercase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __lowercase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase ) def __len__( self : str ) -> List[str]: """simple docstring""" return self.config.num_train_timesteps
325
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class A__ : def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" __lowercase = scheduler __lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] __lowercase = split_batches __lowercase = step_with_optimizer __lowercase = GradientState() def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowercase = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_last_lr() def a__ ( self : List[str] ) -> Tuple: """simple docstring""" return self.scheduler.state_dict() def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.scheduler.load_state_dict(_UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" return self.scheduler.get_lr() def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
325
1
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('socket.socket' ) @patch('builtins.open' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: # ===== initialization ===== __lowercase = Mock() __lowercase = conn, Mock() __lowercase = iter([1, None] ) __lowercase = lambda SCREAMING_SNAKE_CASE : next(SCREAMING_SNAKE_CASE ) # ===== invoke ===== send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
325
import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ = """src/transformers""" # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""") # Catches a line with else: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""") def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None: return None __lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.readlines() __lowercase = 0 while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure __lowercase = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowercase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ): __lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0] __lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: __lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowercase = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __lowercase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowercase = [] while ( line_index < len(SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowercase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int: def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowercase = [] for key in import_dict_objects.keys(): __lowercase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __lowercase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowercase = 'base imports' if key == 'none' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ) -> Tuple: __lowercase = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) __lowercase = parse_init(SCREAMING_SNAKE_CASE ) if objects is not None: __lowercase = analyze_results(*SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('\n'.join(SCREAMING_SNAKE_CASE ) ) if len(SCREAMING_SNAKE_CASE ) > 0: raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0: continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace(os.path.sep , '.' ) submodules.append(SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE ) return submodules SCREAMING_SNAKE_CASE__ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def __SCREAMING_SNAKE_CASE ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. __lowercase = importlib.util.spec_from_file_location( 'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) __lowercase = spec.loader.load_module() __lowercase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F"""{list_of_modules}\n""" 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
325
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model"""} SCREAMING_SNAKE_CASE__ = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } SCREAMING_SNAKE_CASE__ = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } SCREAMING_SNAKE_CASE__ = """▁""" class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = VOCAB_FILES_NAMES lowerCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[str]="[CLS]" , _UpperCAmelCase : List[Any]="[SEP]" , _UpperCAmelCase : Optional[int]="<unk>" , _UpperCAmelCase : Optional[int]="[SEP]" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : Union[str, Any]="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Optional[Any] , ) -> None: """simple docstring""" __lowercase = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) __lowercase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __lowercase = do_lower_case __lowercase = remove_space __lowercase = keep_accents __lowercase = vocab_file __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) @property def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" return len(self.sp_model ) def a__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = self.__dict__.copy() __lowercase = None return state def __setstate__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowercase = {} __lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def a__ ( self : int , _UpperCAmelCase : str ) -> List[str]: """simple docstring""" if self.remove_space: __lowercase = ' '.join(inputs.strip().split() ) else: __lowercase = inputs __lowercase = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: __lowercase = unicodedata.normalize('NFKD' , _UpperCAmelCase ) __lowercase = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: __lowercase = outputs.lower() return outputs def a__ ( self : List[str] , _UpperCAmelCase : str ) -> List[str]: """simple docstring""" __lowercase = self.preprocess_text(_UpperCAmelCase ) __lowercase = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) __lowercase = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): __lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: __lowercase = cur_pieces[1:] else: __lowercase = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def a__ ( self : List[str] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" return self.sp_model.PieceToId(_UpperCAmelCase ) def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" return self.sp_model.IdToPiece(_UpperCAmelCase ) def a__ ( self : str , _UpperCAmelCase : str ) -> Optional[int]: """simple docstring""" __lowercase = [] __lowercase = '' __lowercase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCAmelCase ) + token __lowercase = True __lowercase = [] else: current_sub_tokens.append(_UpperCAmelCase ) __lowercase = False out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def a__ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def a__ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , 'wb' ) as fi: __lowercase = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
325
import logging import os from .state import PartialState class A__ ( logging.LoggerAdapter ): @staticmethod def a__ ( _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) __lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase ) __lowercase = kwargs.pop('in_order' , _UpperCAmelCase ) if self.isEnabledFor(_UpperCAmelCase ): if self._should_log(_UpperCAmelCase ): __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) elif in_order: __lowercase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) state.wait_for_everyone() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]: if log_level is None: __lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE ) __lowercase = logging.getLogger(SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
325
1
import argparse import os import re import packaging.version SCREAMING_SNAKE_CASE__ = """examples/""" SCREAMING_SNAKE_CASE__ = { """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } SCREAMING_SNAKE_CASE__ = { """init""": """src/diffusers/__init__.py""", """setup""": """setup.py""", } SCREAMING_SNAKE_CASE__ = """README.md""" def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.read() __lowercase , __lowercase = REPLACE_PATTERNS[pattern] __lowercase = replace.replace('VERSION' , SCREAMING_SNAKE_CASE ) __lowercase = re_pattern.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> str: for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , pattern='examples' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Tuple: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if not patch: update_version_in_examples(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( ) -> int: __lowercase = '🤗 Transformers currently provides the following architectures' __lowercase = '1. Want to contribute a new model?' with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.readlines() # Find the start of the list. __lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): __lowercase = lines[index].replace( 'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , ) index += 1 with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( ) -> int: with open(REPLACE_FILES['init'] , 'r' ) as f: __lowercase = f.read() __lowercase = REPLACE_PATTERNS['init'][0].search(SCREAMING_SNAKE_CASE ).groups()[0] return packaging.version.parse(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str]=False ) -> Optional[Any]: __lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: __lowercase = default_version.base_version elif patch: __lowercase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: __lowercase = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. __lowercase = input(F"""Which version are you releasing? [{default_version}]""" ) if len(SCREAMING_SNAKE_CASE ) == 0: __lowercase = default_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE , patch=SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = get_version() __lowercase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" __lowercase = current_version.base_version # Check with the user we got that right. __lowercase = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(SCREAMING_SNAKE_CASE ) == 0: __lowercase = dev_version print(F"""Updating version to {version}.""" ) global_version_update(SCREAMING_SNAKE_CASE ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") SCREAMING_SNAKE_CASE__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
325
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: __lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowercase = [3, 3, 3, 3] __lowercase = [5, 5, 5, 5] elif "fl4" in model_name: __lowercase = [4, 4, 4, 4] __lowercase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowercase = [3, 3, 3, 3] if "lrf" in model_name: __lowercase = [3, 3, 3, 3] else: __lowercase = [2, 2, 2, 2] if "tiny" in model_name: __lowercase = 96 elif "small" in model_name: __lowercase = 96 elif "base" in model_name: __lowercase = 128 elif "large" in model_name: __lowercase = 192 elif "xlarge" in model_name: __lowercase = 256 elif "huge" in model_name: __lowercase = 352 # set label information __lowercase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowercase = 'imagenet-22k-id2label.json' else: __lowercase = 'imagenet-1k-id2label.json' __lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = {v: k for k, v in idalabel.items()} __lowercase = FocalNetConfig( embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , ) return config def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict: if "patch_embed.proj" in name: __lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowercase = 'encoder.' + name if "encoder.layers" in name: __lowercase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowercase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowercase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowercase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowercase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowercase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowercase = 'layernorm.weight' if name == "norm.bias": __lowercase = 'layernorm.bias' if "head" in name: __lowercase = name.replace('head' , 'classifier' ) else: __lowercase = 'focalnet.' + name return name def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]: # fmt: off __lowercase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowercase = model_name_to_url[model_name] print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE ) __lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowercase = state_dict.pop(SCREAMING_SNAKE_CASE ) __lowercase = val __lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE ) __lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE ) model.eval() # load state dict model.load_state_dict(SCREAMING_SNAKE_CASE ) # verify conversion __lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowercase = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , ) __lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) __lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ) __lowercase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 ) __lowercase = model(**SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] ) elif model_name == "focalnet-tiny-lrf": __lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] ) elif model_name == "focalnet-small": __lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] ) elif model_name == "focalnet-small-lrf": __lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] ) elif model_name == "focalnet-base": __lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] ) elif model_name == "focalnet-base-lrf": __lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""focalnet-tiny""", type=str, help="""Name of the FocalNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
325
1
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> bool: __lowercase = get_failure_array(SCREAMING_SNAKE_CASE ) # 2) Step through text searching for pattern __lowercase , __lowercase = 0, 0 # index into text, pattern while i < len(SCREAMING_SNAKE_CASE ): if pattern[j] == text[i]: if j == (len(SCREAMING_SNAKE_CASE ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __lowercase = failure[j - 1] continue i += 1 return False def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> list[int]: __lowercase = [0] __lowercase = 0 __lowercase = 1 while j < len(SCREAMING_SNAKE_CASE ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __lowercase = failure[i - 1] continue j += 1 failure.append(SCREAMING_SNAKE_CASE ) return failure if __name__ == "__main__": # Test 1) SCREAMING_SNAKE_CASE__ = """abc1abc12""" SCREAMING_SNAKE_CASE__ = """alskfjaldsabc1abc1abc12k23adsfabcabc""" SCREAMING_SNAKE_CASE__ = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) SCREAMING_SNAKE_CASE__ = """ABABX""" SCREAMING_SNAKE_CASE__ = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) SCREAMING_SNAKE_CASE__ = """AAAB""" SCREAMING_SNAKE_CASE__ = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) SCREAMING_SNAKE_CASE__ = """abcdabcy""" SCREAMING_SNAKE_CASE__ = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) SCREAMING_SNAKE_CASE__ = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
325
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "mask2former" lowerCAmelCase__ : List[Any] = ["swin"] lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"} def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowercase = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = backbone_config.pop('model_type' ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a__ ( self : str ) -> Dict[str, any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
325
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class A__ ( lowerCAmelCase__ ): def a__ ( self : Dict ) -> Dict: """simple docstring""" __lowercase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'tf_padding' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'depth_multiplier' ) ) class A__ : def __init__( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int=13 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Optional[int]=0.25 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=10_24 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Optional[int]="relu6" , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : int=True , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Union[str, Any]=None , ) -> str: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = num_channels __lowercase = image_size __lowercase = depth_multiplier __lowercase = min_depth __lowercase = tf_padding __lowercase = int(last_hidden_size * depth_multiplier ) __lowercase = output_stride __lowercase = hidden_act __lowercase = classifier_dropout_prob __lowercase = use_labels __lowercase = is_training __lowercase = num_labels __lowercase = initializer_range __lowercase = scope def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.num_labels ) __lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels, pixel_labels def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def a__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> List[str]: """simple docstring""" __lowercase = MobileNetVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def a__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> str: """simple docstring""" __lowercase = self.num_labels __lowercase = MobileNetVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () lowerCAmelCase__ : List[str] = ( {"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : Any = False def a__ ( self : Dict ) -> List[str]: """simple docstring""" __lowercase = MobileNetVaModelTester(self ) __lowercase = MobileNetVaConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def a__ ( self : int ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV1 does not use inputs_embeds' ) def a__ ( self : Any ) -> Tuple: """simple docstring""" pass @unittest.skip(reason='MobileNetV1 does not support input and output embeddings' ) def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip(reason='MobileNetV1 does not output attentions' ) def a__ ( self : Optional[int] ) -> Any: """simple docstring""" pass def a__ ( self : Optional[int] ) -> Any: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_UpperCAmelCase ) __lowercase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" def check_hidden_states_output(_UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ): __lowercase = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __lowercase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = outputs.hidden_states __lowercase = 26 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def a__ ( self : Optional[int] ) -> str: """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = MobileNetVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ) -> List[Any]: __lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def a__ ( self : Optional[int] ) -> Any: """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None ) @slow def a__ ( self : Tuple ) -> Dict: """simple docstring""" __lowercase = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(_UpperCAmelCase ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __lowercase = model(**_UpperCAmelCase ) # verify the logits __lowercase = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __lowercase = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
325
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __lowercase = TOKENIZER_CLASSES else: __lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __lowercase = TOKENIZER_CLASSES[tokenizer_name] __lowercase = True if checkpoint_name is None: __lowercase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowercase = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __lowercase , __lowercase = checkpoint.split('/' ) __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif add_prefix: __lowercase = checkpoint __lowercase = dump_path else: __lowercase = None __lowercase = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0] if next_char == "/": __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __lowercase = tokenizer.save_pretrained( SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(SCREAMING_SNAKE_CASE ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files.""" ) parser.add_argument( """--tokenizer_name""", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' """download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--checkpoint_name""", default=None, type=str, help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""", ) parser.add_argument( """--force_download""", action="""store_true""", help="""Re-download checkpoints.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
325
1
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A__ : lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowerCAmelCase__ )} , ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class A__ : lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) lowerCAmelCase__ : bool = field( default=lowerCAmelCase__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) lowerCAmelCase__ : bool = field( default=lowerCAmelCase__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) lowerCAmelCase__ : bool = field(default=lowerCAmelCase__ , metadata={"help": "Whether ot not to use whole word mask."} ) lowerCAmelCase__ : float = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) lowerCAmelCase__ : float = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) lowerCAmelCase__ : int = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) lowerCAmelCase__ : int = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) lowerCAmelCase__ : bool = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : DataTrainingArguments , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[str] = None , ) -> List[str]: def _dataset(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , ) return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size ) else: return TextDataset( tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __SCREAMING_SNAKE_CASE ( ) -> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: __lowercase = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: __lowercase = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: __lowercase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: __lowercase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: __lowercase = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) __lowercase = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: __lowercase = tokenizer.max_len # Our input block size will be the max possible for the model else: __lowercase = min(data_args.block_size , tokenizer.max_len ) # Get datasets __lowercase = ( get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) __lowercase = ( get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": __lowercase = DataCollatorForPermutationLanguageModeling( tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: __lowercase = DataCollatorForWholeWordMask( tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability ) else: __lowercase = DataCollatorForLanguageModeling( tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer __lowercase = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: __lowercase = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=SCREAMING_SNAKE_CASE ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase = trainer.evaluate() __lowercase = math.exp(eval_output['eval_loss'] ) __lowercase = {'perplexity': perplexity} __lowercase = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(SCREAMING_SNAKE_CASE ) return results def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
325
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
1
class A__ : def __init__( self : Any ) -> None: """simple docstring""" __lowercase = {} # Mapping from char to TrieNode __lowercase = False def a__ ( self : Any , _UpperCAmelCase : list[str] ) -> None: """simple docstring""" for word in words: self.insert(_UpperCAmelCase ) def a__ ( self : Dict , _UpperCAmelCase : str ) -> None: """simple docstring""" __lowercase = self for char in word: if char not in curr.nodes: __lowercase = TrieNode() __lowercase = curr.nodes[char] __lowercase = True def a__ ( self : Optional[Any] , _UpperCAmelCase : str ) -> bool: """simple docstring""" __lowercase = self for char in word: if char not in curr.nodes: return False __lowercase = curr.nodes[char] return curr.is_leaf def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> None: """simple docstring""" def _delete(_UpperCAmelCase : TrieNode , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> bool: if index == len(_UpperCAmelCase ): # If word does not exist if not curr.is_leaf: return False __lowercase = False return len(curr.nodes ) == 0 __lowercase = word[index] __lowercase = curr.nodes.get(_UpperCAmelCase ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __lowercase = _delete(_UpperCAmelCase , _UpperCAmelCase , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , _UpperCAmelCase , 0 ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : TrieNode , SCREAMING_SNAKE_CASE : str ) -> None: if node.is_leaf: print(SCREAMING_SNAKE_CASE , end=' ' ) for key, value in node.nodes.items(): print_words(SCREAMING_SNAKE_CASE , word + key ) def __SCREAMING_SNAKE_CASE ( ) -> bool: __lowercase = 'banana bananas bandana band apple all beast'.split() __lowercase = TrieNode() root.insert_many(SCREAMING_SNAKE_CASE ) # print_words(root, "") assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words ) assert root.find('banana' ) assert not root.find('bandanas' ) assert not root.find('apps' ) assert root.find('apple' ) assert root.find('all' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool ) -> None: print(str(SCREAMING_SNAKE_CASE ) , 'works!' if passes else 'doesn\'t work :(' ) def __SCREAMING_SNAKE_CASE ( ) -> None: assert test_trie() def __SCREAMING_SNAKE_CASE ( ) -> None: print_results('Testing trie functionality' , test_trie() ) if __name__ == "__main__": main()
325
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp SCREAMING_SNAKE_CASE__ = 5 SCREAMING_SNAKE_CASE__ = 10 @require_sentencepiece @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase = sp.SentencePieceProcessor() spm_model.Load(_UpperCAmelCase ) __lowercase = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : str ) -> int: """simple docstring""" __lowercase = '<pad>' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 10_01 ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) __lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class A__ ( unittest.TestCase ): lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase__ : Dict = "C'est trop cool" lowerCAmelCase__ : List[Any] = "Esto es genial" @classmethod def a__ ( cls : Any ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def a__ ( self : str ) -> int: """simple docstring""" self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids ) __lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2] __lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = 'fr' __lowercase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _UpperCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __lowercase = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = " " ) -> list: __lowercase = [] __lowercase = 0 for index, char in enumerate(SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) __lowercase = index + 1 elif index + 1 == len(SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
325
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = "layoutlmv3" def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = max_ad_position_embeddings __lowercase = coordinate_size __lowercase = shape_size __lowercase = has_relative_attention_bias __lowercase = rel_pos_bins __lowercase = max_rel_pos __lowercase = has_spatial_attention_bias __lowercase = rel_ad_pos_bins __lowercase = max_rel_ad_pos __lowercase = text_embed __lowercase = visual_embed __lowercase = input_size __lowercase = num_channels __lowercase = patch_size __lowercase = classifier_dropout class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = version.parse("1.12" ) @property def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def a__ ( self : int ) -> float: """simple docstring""" return 1e-5 @property def a__ ( self : str ) -> int: """simple docstring""" return 12 def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowercase = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = dict( processor( _UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) ) return inputs
325
1
import unittest from knapsack import knapsack as k class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Any: """simple docstring""" __lowercase = 0 __lowercase = [0] __lowercase = [0] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 ) __lowercase = [60] __lowercase = [10] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = 3 __lowercase = [1, 2, 3] __lowercase = [3, 2, 1] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 5 ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = 50 __lowercase = [60, 1_00, 1_20] __lowercase = [10, 20, 30] __lowercase = len(_UpperCAmelCase ) self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 2_20 ) if __name__ == "__main__": unittest.main()
325
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE__ = """RegNetConfig""" # Base docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = """tabby, tabby cat""" SCREAMING_SNAKE_CASE__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A__ ( nn.Module ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any: """simple docstring""" super().__init__() __lowercase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowercase = config.num_channels def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase = self.embedder(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: """simple docstring""" super().__init__() __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) __lowercase = nn.Sequential( nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def a__ ( self : str , _UpperCAmelCase : Dict ) -> str: """simple docstring""" __lowercase = self.pooler(_UpperCAmelCase ) __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_state * attention return hidden_state class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict: """simple docstring""" super().__init__() __lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , ) def a__ ( self : Any , _UpperCAmelCase : str ) -> int: """simple docstring""" __lowercase = self.layers(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int: """simple docstring""" super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(_UpperCAmelCase ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = RegNetConfig lowerCAmelCase__ : Optional[int] = "regnet" lowerCAmelCase__ : Dict = "pixel_values" lowerCAmelCase__ : List[str] = True def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = value SCREAMING_SNAKE_CASE__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SCREAMING_SNAKE_CASE__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A__ ( lowerCAmelCase__ ): def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config __lowercase = RegNetEmbeddings(_UpperCAmelCase ) __lowercase = RegNetEncoder(_UpperCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(_UpperCAmelCase ) __lowercase = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config.num_labels __lowercase = RegNetModel(_UpperCAmelCase ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(_UpperCAmelCase ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = 'single_label_classification' else: __lowercase = 'multi_label_classification' if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
325
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ = { """vocab_file""": { """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/spiece.model""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/spiece.model""", }, """tokenizer_file""": { """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__ = { """google/fnet-base""": 512, """google/fnet-large""": 512, } SCREAMING_SNAKE_CASE__ = """▁""" class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES lowerCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : Optional[Any] = ["input_ids", "token_type_ids"] lowerCAmelCase__ : Any = FNetTokenizer def __init__( self : Tuple , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : int="[SEP]" , _UpperCAmelCase : Union[str, Any]="<pad>" , _UpperCAmelCase : Optional[int]="[CLS]" , _UpperCAmelCase : List[Any]="[MASK]" , **_UpperCAmelCase : Optional[Any] , ) -> Optional[int]: """simple docstring""" __lowercase = ( AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = do_lower_case __lowercase = remove_space __lowercase = keep_accents __lowercase = vocab_file __lowercase = False if not self.vocab_file else True def a__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __lowercase = [self.sep_token_id] __lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
325
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
325
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ ( lowerCAmelCase__ ): def a__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'embed_dim' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_heads' ) ) class A__ : def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Dict=64 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : int=[16, 48, 96] , _UpperCAmelCase : Dict=[1, 3, 6] , _UpperCAmelCase : List[Any]=[1, 2, 10] , _UpperCAmelCase : str=[7, 3, 3] , _UpperCAmelCase : List[str]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Union[str, Any]=[2, 2, 2] , _UpperCAmelCase : Tuple=[False, False, True] , _UpperCAmelCase : str=[0.0, 0.0, 0.0] , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[Any]=1e-1_2 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Union[str, Any]=2 , ) -> str: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = image_size __lowercase = patch_sizes __lowercase = patch_stride __lowercase = patch_padding __lowercase = is_training __lowercase = use_labels __lowercase = num_labels __lowercase = num_channels __lowercase = embed_dim __lowercase = num_heads __lowercase = stride_kv __lowercase = depth __lowercase = cls_token __lowercase = attention_drop_rate __lowercase = initializer_range __lowercase = layer_norm_eps def a__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase = None if self.use_labels: # create a random int32 tensor of given shape __lowercase = ids_tensor([self.batch_size] , self.num_labels ) __lowercase = self.get_config() return config, pixel_values, labels def a__ ( self : List[str] ) -> str: """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def a__ ( self : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = TFCvtModel(config=_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase , training=_UpperCAmelCase ) __lowercase = (self.image_size, self.image_size) __lowercase , __lowercase = image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowercase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowercase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def a__ ( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" __lowercase = self.num_labels __lowercase = TFCvtForImageClassification(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () lowerCAmelCase__ : List[Any] = ( {"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification} if is_tf_available() else {} ) lowerCAmelCase__ : List[Any] = False lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : Optional[int] = False lowerCAmelCase__ : Optional[int] = False lowerCAmelCase__ : Optional[int] = False def a__ ( self : Any ) -> Any: """simple docstring""" __lowercase = TFCvtModelTester(self ) __lowercase = TFCvtConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Tuple ) -> int: """simple docstring""" self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def a__ ( self : List[str] ) -> Tuple: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def a__ ( self : Dict ) -> int: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(_UpperCAmelCase ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def a__ ( self : Any ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = model_class(_UpperCAmelCase ) __lowercase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase = [*signature.parameters.keys()] __lowercase = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def a__ ( self : str ) -> str: """simple docstring""" def check_hidden_states_output(_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ): __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = outputs.hidden_states __lowercase = len(self.model_tester.depth ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def a__ ( self : int ) -> int: """simple docstring""" for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = TFCvtModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __SCREAMING_SNAKE_CASE ( ) -> Any: __lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A__ ( unittest.TestCase ): @cached_property def a__ ( self : Dict ) -> List[str]: """simple docstring""" return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def a__ ( self : List[Any] ) -> str: """simple docstring""" __lowercase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __lowercase = self.default_image_processor __lowercase = prepare_img() __lowercase = image_processor(images=_UpperCAmelCase , return_tensors='tf' ) # forward pass __lowercase = model(**_UpperCAmelCase ) # verify the logits __lowercase = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __lowercase = tf.constant([0.9_285, 0.9_015, -0.3_150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1e-4 ) )
325
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) class A__ ( enum.Enum ): lowerCAmelCase__ : Dict = "all_checks" lowerCAmelCase__ : List[Any] = "basic_checks" lowerCAmelCase__ : Dict = "no_checks" class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]: if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __lowercase = ' for ' + verification_name if verification_name is not None else '' if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]: if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) ) logger.info('All the splits matched successfully.' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict: if record_checksum: __lowercase = shaaaa() with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b'' ): m.update(SCREAMING_SNAKE_CASE ) __lowercase = m.hexdigest() else: __lowercase = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
325
1
from pathlib import Path import fire from tqdm import tqdm def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any]="ro" , SCREAMING_SNAKE_CASE : Tuple="en" , SCREAMING_SNAKE_CASE : Dict="wmt16" , SCREAMING_SNAKE_CASE : int=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('run pip install datasets' ) __lowercase = F"""{src_lang}-{tgt_lang}""" print(F"""Converting {dataset}-{pair}""" ) __lowercase = datasets.load_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if save_dir is None: __lowercase = F"""{dataset}-{pair}""" __lowercase = Path(SCREAMING_SNAKE_CASE ) save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE ) for split in ds.keys(): print(F"""Splitting {split} with {ds[split].num_rows} records""" ) # to save to val.source, val.target like summary datasets __lowercase = 'val' if split == 'validation' else split __lowercase = save_dir.joinpath(F"""{fn}.source""" ) __lowercase = save_dir.joinpath(F"""{fn}.target""" ) __lowercase = src_path.open('w+' ) __lowercase = tgt_path.open('w+' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): __lowercase = x['translation'] src_fp.write(ex[src_lang] + '\n' ) tgt_fp.write(ex[tgt_lang] + '\n' ) print(F"""Saved {dataset} dataset to {save_dir}""" ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
325
import math def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict: __lowercase = factor * value __lowercase = value while not is_prime(SCREAMING_SNAKE_CASE ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE ) return value
325
1
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp SCREAMING_SNAKE_CASE__ = 5 SCREAMING_SNAKE_CASE__ = 10 @require_sentencepiece @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase = sp.SentencePieceProcessor() spm_model.Load(_UpperCAmelCase ) __lowercase = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : str ) -> int: """simple docstring""" __lowercase = '<pad>' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 10_01 ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) __lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class A__ ( unittest.TestCase ): lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase__ : Dict = "C'est trop cool" lowerCAmelCase__ : List[Any] = "Esto es genial" @classmethod def a__ ( cls : Any ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def a__ ( self : str ) -> int: """simple docstring""" self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids ) __lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2] __lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = 'fr' __lowercase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _UpperCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __lowercase = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
325
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Any = ["pixel_values"] def __init__( self : Tuple , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : bool = True , **_UpperCAmelCase : int , ) -> None: """simple docstring""" super().__init__(**_UpperCAmelCase ) __lowercase = size if size is not None else {'height': 3_84, 'width': 3_84} __lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) __lowercase = do_resize __lowercase = size __lowercase = resample __lowercase = do_rescale __lowercase = rescale_factor __lowercase = do_normalize __lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase = do_convert_rgb def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> np.ndarray: """simple docstring""" __lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) __lowercase = (size['height'], size['width']) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Tuple , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Optional[int] , ) -> str: """simple docstring""" return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ) -> np.ndarray: """simple docstring""" return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Dict[str, int]] = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[float] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Tuple , ) -> PIL.Image.Image: """simple docstring""" __lowercase = do_resize if do_resize is not None else self.do_resize __lowercase = resample if resample is not None else self.resample __lowercase = do_rescale if do_rescale is not None else self.do_rescale __lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase = do_normalize if do_normalize is not None else self.do_normalize __lowercase = image_mean if image_mean is not None else self.image_mean __lowercase = image_std if image_std is not None else self.image_std __lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase = size if size is not None else self.size __lowercase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) __lowercase = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase = [convert_to_rgb(_UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. __lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: __lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: __lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: __lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] __lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] __lowercase = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase ) return encoded_outputs
325
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class A__ : lowerCAmelCase__ : Dict = BlenderbotSmallConfig lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : int = "gelu" def __init__( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : int=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=20 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Union[str, Any]=0 , ) -> int: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = eos_token_id __lowercase = pad_token_id __lowercase = bos_token_id def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowercase = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __lowercase = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def a__ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" __lowercase = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder() __lowercase = inputs_dict['input_ids'] __lowercase = input_ids[:1, :] __lowercase = inputs_dict['attention_mask'][:1, :] __lowercase = inputs_dict['head_mask'] __lowercase = 1 # first forward pass __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) __lowercase , __lowercase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowercase = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowercase = output_from_no_past[:, -3:, random_slice_idx] __lowercase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> Tuple: if attention_mask is None: __lowercase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __lowercase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Any = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) lowerCAmelCase__ : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ : int = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ : int = True lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Any = False def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __lowercase = TFBlenderbotSmallModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_tokenizers @require_tf class A__ ( unittest.TestCase ): lowerCAmelCase__ : List[str] = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] lowerCAmelCase__ : Dict = "facebook/blenderbot_small-90M" @cached_property def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.tokenizer(self.src_text , return_tensors='tf' ) __lowercase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_UpperCAmelCase , ) __lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
325
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = "transfo-xl" lowerCAmelCase__ : int = ["mems"] lowerCAmelCase__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple: """simple docstring""" __lowercase = vocab_size __lowercase = [] self.cutoffs.extend(_UpperCAmelCase ) if proj_share_all_but_first: __lowercase = [False] + [True] * len(self.cutoffs ) else: __lowercase = [False] + [False] * len(self.cutoffs ) __lowercase = d_model __lowercase = d_embed __lowercase = d_head __lowercase = d_inner __lowercase = div_val __lowercase = pre_lnorm __lowercase = n_layer __lowercase = n_head __lowercase = mem_len __lowercase = same_length __lowercase = attn_type __lowercase = clamp_len __lowercase = sample_softmax __lowercase = adaptive __lowercase = dropout __lowercase = dropatt __lowercase = untie_r __lowercase = init __lowercase = init_range __lowercase = proj_init_std __lowercase = init_std __lowercase = layer_norm_epsilon super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
325
1
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "mask2former" lowerCAmelCase__ : List[Any] = ["swin"] lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"} def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowercase = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = backbone_config.pop('model_type' ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a__ ( self : str ) -> Dict[str, any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
325
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: for attribute in key.split('.' ): __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __lowercase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowercase = None for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __lowercase = True elif name.split('.' )[0] == "proj": __lowercase = fairseq_model.proj __lowercase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowercase = True if "*" in mapped_key: __lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __lowercase = 'weight_g' elif "weight_v" in name: __lowercase = 'weight_v' elif "bias" in name: __lowercase = 'bias' elif "weight" in name: __lowercase = 'weight' else: __lowercase = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: __lowercase = full_name.split('conv_layers.' )[-1] __lowercase = name.split('.' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) __lowercase = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __lowercase = f.readlines() __lowercase = [line.split(' ' )[0] for line in lines] __lowercase = len(SCREAMING_SNAKE_CASE ) __lowercase = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]: __lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaConfig.from_pretrained( SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE ) __lowercase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowercase = model[0].eval() # set weights for wav2vec2 encoder __lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE ) __lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE ) __lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __lowercase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) __lowercase = False # add projection layer __lowercase = nn.Parameter(projection_layer.weight ) __lowercase = nn.Parameter(projection_layer.bias ) __lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE ) with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = hf_wavavec.config.to_dict() __lowercase = tokenizer.pad_token_id __lowercase = tokenizer.bos_token_id __lowercase = tokenizer.eos_token_id __lowercase = 'speech_to_text_2' __lowercase = 'wav2vec2' __lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
325
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = AutoencoderKL lowerCAmelCase__ : Dict = "sample" lowerCAmelCase__ : Tuple = 1e-2 @property def a__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowercase = 4 __lowercase = 3 __lowercase = (32, 32) __lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) return {"sample": image} @property def a__ ( self : Dict ) -> int: """simple docstring""" return (3, 32, 32) @property def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" return (3, 32, 32) def a__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __lowercase = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } __lowercase = self.dummy_input return init_dict, inputs_dict def a__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass def a__ ( self : Optional[int] ) -> str: """simple docstring""" pass @unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' ) def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common() __lowercase = self.model_class(**_UpperCAmelCase ) model.to(_UpperCAmelCase ) assert not model.is_gradient_checkpointing and model.training __lowercase = model(**_UpperCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __lowercase = torch.randn_like(_UpperCAmelCase ) __lowercase = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __lowercase = self.model_class(**_UpperCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_UpperCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __lowercase = model_a(**_UpperCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __lowercase = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) __lowercase = dict(model.named_parameters() ) __lowercase = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" __lowercase , __lowercase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(_UpperCAmelCase ) __lowercase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' ) __lowercase = model.to(_UpperCAmelCase ) model.eval() if torch_device == "mps": __lowercase = torch.manual_seed(0 ) else: __lowercase = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __lowercase = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __lowercase = image.to(_UpperCAmelCase ) with torch.no_grad(): __lowercase = model(_UpperCAmelCase , sample_posterior=_UpperCAmelCase , generator=_UpperCAmelCase ).sample __lowercase = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __lowercase = torch.tensor( [ -4.0_0_7_8e-0_1, -3.8_3_2_3e-0_4, -1.2_6_8_1e-0_1, -1.1_4_6_2e-0_1, 2.0_0_9_5e-0_1, 1.0_8_9_3e-0_1, -8.8_2_4_7e-0_2, -3.0_3_6_1e-0_1, -9.8_6_4_4e-0_3, ] ) elif torch_device == "cpu": __lowercase = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __lowercase = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-2 ) ) @slow class A__ ( unittest.TestCase ): def a__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" return f"""gaussian_noise_s={seed}_shape={"_".join([str(_UpperCAmelCase ) for s in shape] )}.npy""" def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[Any]=(4, 3, 5_12, 5_12) , _UpperCAmelCase : Optional[int]=False ) -> Optional[Any]: """simple docstring""" __lowercase = torch.floataa if fpaa else torch.floataa __lowercase = torch.from_numpy(load_hf_numpy(self.get_file_format(_UpperCAmelCase , _UpperCAmelCase ) ) ).to(_UpperCAmelCase ).to(_UpperCAmelCase ) return image def a__ ( self : Dict , _UpperCAmelCase : Tuple="CompVis/stable-diffusion-v1-4" , _UpperCAmelCase : Optional[int]=False ) -> List[str]: """simple docstring""" __lowercase = 'fp16' if fpaa else None __lowercase = torch.floataa if fpaa else torch.floataa __lowercase = AutoencoderKL.from_pretrained( _UpperCAmelCase , subfolder='vae' , torch_dtype=_UpperCAmelCase , revision=_UpperCAmelCase , ) model.to(_UpperCAmelCase ).eval() return model def a__ ( self : str , _UpperCAmelCase : int=0 ) -> Tuple: """simple docstring""" if torch_device == "mps": return torch.manual_seed(_UpperCAmelCase ) return torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def a__ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> str: """simple docstring""" __lowercase = self.get_sd_vae_model() __lowercase = self.get_sd_image(_UpperCAmelCase ) __lowercase = self.get_generator(_UpperCAmelCase ) with torch.no_grad(): __lowercase = model(_UpperCAmelCase , generator=_UpperCAmelCase , sample_posterior=_UpperCAmelCase ).sample assert sample.shape == image.shape __lowercase = sample[-1, -2:, -2:, :2].flatten().float().cpu() __lowercase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def a__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = self.get_sd_vae_model(fpaa=_UpperCAmelCase ) __lowercase = self.get_sd_image(_UpperCAmelCase , fpaa=_UpperCAmelCase ) __lowercase = self.get_generator(_UpperCAmelCase ) with torch.no_grad(): __lowercase = model(_UpperCAmelCase , generator=_UpperCAmelCase , sample_posterior=_UpperCAmelCase ).sample assert sample.shape == image.shape __lowercase = sample[-1, -2:, :2, -2:].flatten().float().cpu() __lowercase = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def a__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int: """simple docstring""" __lowercase = self.get_sd_vae_model() __lowercase = self.get_sd_image(_UpperCAmelCase ) with torch.no_grad(): __lowercase = model(_UpperCAmelCase ).sample assert sample.shape == image.shape __lowercase = sample[-1, -2:, -2:, :2].flatten().float().cpu() __lowercase = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def a__ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = self.get_sd_vae_model() __lowercase = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): __lowercase = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] __lowercase = sample[-1, -2:, :2, -2:].flatten().cpu() __lowercase = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" __lowercase = self.get_sd_vae_model(fpaa=_UpperCAmelCase ) __lowercase = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=_UpperCAmelCase ) with torch.no_grad(): __lowercase = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] __lowercase = sample[-1, -2:, :2, -2:].flatten().float().cpu() __lowercase = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' ) def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> str: """simple docstring""" __lowercase = self.get_sd_vae_model(fpaa=_UpperCAmelCase ) __lowercase = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 64, 64) , fpaa=_UpperCAmelCase ) with torch.no_grad(): __lowercase = model.decode(_UpperCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __lowercase = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' ) def a__ ( self : List[Any] , _UpperCAmelCase : List[str] ) -> List[Any]: """simple docstring""" __lowercase = self.get_sd_vae_model() __lowercase = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): __lowercase = model.decode(_UpperCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __lowercase = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def a__ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> Tuple: """simple docstring""" __lowercase = self.get_sd_vae_model() __lowercase = self.get_sd_image(_UpperCAmelCase ) __lowercase = self.get_generator(_UpperCAmelCase ) with torch.no_grad(): __lowercase = model.encode(_UpperCAmelCase ).latent_dist __lowercase = dist.sample(generator=_UpperCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __lowercase = sample[0, -1, -3:, -3:].flatten().cpu() __lowercase = torch.tensor(_UpperCAmelCase ) __lowercase = 3e-3 if torch_device != 'mps' else 1e-2 assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase )
325
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: __lowercase = [0 for i in range(r + 1 )] # nc0 = 1 __lowercase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
325
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = ["vqvae"] def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase ) def a__ ( self : Tuple ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00 @torch.no_grad() def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" __lowercase = steps or self.get_default_steps() self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowercase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowercase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_UpperCAmelCase , device=self.device , ) __lowercase = noise __lowercase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase ) __lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __lowercase = (input_image / 2_55) * 2 - 1 __lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample( generator=_UpperCAmelCase )[0] __lowercase = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] ) __lowercase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowercase = int(mask_start_secs * pixels_per_second ) __lowercase = int(mask_end_secs * pixels_per_second ) __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _UpperCAmelCase ): __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample'] else: __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] if isinstance(self.scheduler , _UpperCAmelCase ): __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] else: __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] if mask is not None: if mask_start > 0: __lowercase = mask[:, step, :, :mask_start] if mask_end > 0: __lowercase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowercase = 1 / self.vqvae.config.scaling_factor * images __lowercase = self.vqvae.decode(_UpperCAmelCase )['sample'] __lowercase = (images / 2 + 0.5).clamp(0 , 1 ) __lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowercase = (images * 2_55).round().astype('uint8' ) __lowercase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) ) __lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) ) @torch.no_grad() def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , _UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __lowercase = (sample / 2_55) * 2 - 1 __lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowercase = self.scheduler.alphas_cumprod[t] __lowercase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowercase = 1 - alpha_prod_t __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] __lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor: """simple docstring""" __lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
325
1
import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ = """src/transformers""" # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""") # Catches a line with else: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""") def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None: return None __lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.readlines() __lowercase = 0 while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure __lowercase = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowercase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ): __lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0] __lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: __lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowercase = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __lowercase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowercase = [] while ( line_index < len(SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowercase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int: def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowercase = [] for key in import_dict_objects.keys(): __lowercase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __lowercase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowercase = 'base imports' if key == 'none' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ) -> Tuple: __lowercase = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) __lowercase = parse_init(SCREAMING_SNAKE_CASE ) if objects is not None: __lowercase = analyze_results(*SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('\n'.join(SCREAMING_SNAKE_CASE ) ) if len(SCREAMING_SNAKE_CASE ) > 0: raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0: continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace(os.path.sep , '.' ) submodules.append(SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE ) return submodules SCREAMING_SNAKE_CASE__ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def __SCREAMING_SNAKE_CASE ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. __lowercase = importlib.util.spec_from_file_location( 'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) __lowercase = spec.loader.load_module() __lowercase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F"""{list_of_modules}\n""" 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
325
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. SCREAMING_SNAKE_CASE__ = 10 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if array[i] == target: return i return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __lowercase = one_third - 1 elif array[two_third] < target: __lowercase = two_third + 1 else: __lowercase = one_third + 1 __lowercase = two_third - 1 else: return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip() SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip()) SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target) SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ) -> Any: print('\nThe shortest path matrix using Floyd Warshall algorithm\n' ) for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): if dist[i][j] != float('inf' ): print(int(dist[i][j] ) , end='\t' ) else: print('INF' , end='\t' ) print() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ) -> List[Any]: __lowercase = [[float('inf' ) for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): __lowercase = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(SCREAMING_SNAKE_CASE ): # looping through rows of graph array for i in range(SCREAMING_SNAKE_CASE ): # looping through columns of graph array for j in range(SCREAMING_SNAKE_CASE ): if ( dist[i][k] != float('inf' ) and dist[k][j] != float('inf' ) and dist[i][k] + dist[k][j] < dist[i][j] ): __lowercase = dist[i][k] + dist[k][j] _print_dist(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return dist, v if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = int(input("""Enter number of vertices: """)) SCREAMING_SNAKE_CASE__ = int(input("""Enter number of edges: """)) SCREAMING_SNAKE_CASE__ = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): SCREAMING_SNAKE_CASE__ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) SCREAMING_SNAKE_CASE__ = int(input("""Enter source:""")) SCREAMING_SNAKE_CASE__ = int(input("""Enter destination:""")) SCREAMING_SNAKE_CASE__ = float(input("""Enter weight:""")) SCREAMING_SNAKE_CASE__ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
325
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = module __lowercase = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) __lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ : int = "bigscience/bloom-1b7" # Constant values lowerCAmelCase__ : Any = 2.109659552692574 lowerCAmelCase__ : str = "Hello my name is" lowerCAmelCase__ : Any = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) lowerCAmelCase__ : List[Any] = 10 def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> int: """simple docstring""" __lowercase = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) __lowercase = config.to_dict() __lowercase = config.to_diff_dict() __lowercase = config.to_json_string() def a__ ( self : Dict ) -> Tuple: """simple docstring""" from bitsandbytes.nn import Paramsabit __lowercase = self.model_fpaa.get_memory_footprint() __lowercase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowercase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def a__ ( self : Tuple ) -> str: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def a__ ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = BitsAndBytesConfig() __lowercase = True __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : str ) -> List[str]: """simple docstring""" with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_fpaa.to(torch.floataa ) __lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowercase = self.model_fpaa.to('cpu' ) # Check this does not throw an error __lowercase = self.model_fpaa.half() # Check this does not throw an error __lowercase = self.model_fpaa.float() def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): @classmethod def a__ ( cls : int ) -> Tuple: """simple docstring""" __lowercase = 't5-small' __lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __lowercase = AutoTokenizer.from_pretrained(cls.model_name ) __lowercase = 'Translate in German: Hello, my dog is cute' def a__ ( self : List[Any] ) -> Dict: """simple docstring""" gc.collect() torch.cuda.empty_cache() def a__ ( self : int ) -> int: """simple docstring""" from transformers import TaForConditionalGeneration __lowercase = TaForConditionalGeneration._keep_in_fpaa_modules __lowercase = None # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) __lowercase = modules def a__ ( self : str ) -> Optional[Any]: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" super().setUp() # model_name __lowercase = 'bigscience/bloom-560m' __lowercase = 't5-small' # Different types of model __lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model __lowercase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model __lowercase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : int ) -> List[str]: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> str: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( lowerCAmelCase__ ): def a__ ( self : str ) -> str: """simple docstring""" super().setUp() def a__ ( self : Dict ) -> Any: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> int: """simple docstring""" __lowercase = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowercase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() def a__ ( self : List[Any] ) -> int: """simple docstring""" __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class A__ ( lowerCAmelCase__ ): def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = 'facebook/opt-350m' super().setUp() def a__ ( self : Dict ) -> List[str]: """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowercase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowercase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): __lowercase = LoRALayer(module.q_proj , rank=16 ) __lowercase = LoRALayer(module.k_proj , rank=16 ) __lowercase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowercase = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Any = "gpt2-xl" lowerCAmelCase__ : str = 3.3191854854152187
325
1
import qiskit def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 2 ) -> qiskit.result.counts.Counts: __lowercase = qubits # Using Aer's simulator __lowercase = qiskit.Aer.get_backend('aer_simulator' ) # Creating a Quantum Circuit acting on the q register __lowercase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , SCREAMING_SNAKE_CASE ): # Adding CX (CNOT) gate circuit.cx(i - 1 , SCREAMING_SNAKE_CASE ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(SCREAMING_SNAKE_CASE ) ) , list(range(SCREAMING_SNAKE_CASE ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator __lowercase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1000 ) return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
325
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = True __lowercase = True __lowercase = 99 __lowercase = 3_84 __lowercase = 2 __lowercase = 4 __lowercase = 37 __lowercase = 'gelu' __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_12 __lowercase = 16 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = 1_28 __lowercase = 2 __lowercase = 9 __lowercase = 1 __lowercase = None def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModel(config=_UpperCAmelCase ) __lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase = [input_ids, input_mask] __lowercase = model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" __lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.num_choices __lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" __lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase__ : List[str] = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : List[str] = False def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : int ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = True if hasattr(_UpperCAmelCase , 'use_cache' ): __lowercase = True __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) for model_class in self.all_model_classes: __lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = model_class(_UpperCAmelCase ) __lowercase = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' ) __lowercase = tf.keras.models.load_model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = outputs['encoder_hidden_states'] __lowercase = outputs['encoder_attentions'] else: __lowercase = outputs['hidden_states'] __lowercase = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __lowercase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_UpperCAmelCase ) def a__ ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase : int ): __lowercase = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __lowercase = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ): __lowercase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __lowercase = True __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A__ ( unittest.TestCase ): @slow def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(_UpperCAmelCase )[0] __lowercase = [1, 6, 7_68] self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str: if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) __lowercase = str(bin(SCREAMING_SNAKE_CASE ) ) binary_number += "0" * shift_amount return binary_number def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str: if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) __lowercase = str(bin(SCREAMING_SNAKE_CASE ) )[2:] if shift_amount >= len(SCREAMING_SNAKE_CASE ): return "0b0" __lowercase = binary_number[: len(SCREAMING_SNAKE_CASE ) - shift_amount] return "0b" + shifted_binary_number def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str: if number >= 0: # Get binary representation of positive number __lowercase = '0' + str(bin(SCREAMING_SNAKE_CASE ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number __lowercase = len(bin(SCREAMING_SNAKE_CASE )[3:] ) # Find 2's complement of number __lowercase = bin(abs(SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:] __lowercase = ( '1' + '0' * (binary_number_length - len(SCREAMING_SNAKE_CASE )) + binary_number ) if shift_amount >= len(SCREAMING_SNAKE_CASE ): return "0b" + binary_number[0] * len(SCREAMING_SNAKE_CASE ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(SCREAMING_SNAKE_CASE ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
325
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class A__ : def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" __lowercase = scheduler __lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] __lowercase = split_batches __lowercase = step_with_optimizer __lowercase = GradientState() def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowercase = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_last_lr() def a__ ( self : List[str] ) -> Tuple: """simple docstring""" return self.scheduler.state_dict() def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.scheduler.load_state_dict(_UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" return self.scheduler.get_lr() def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
325
1
from ...processing_utils import ProcessorMixin class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = "WhisperFeatureExtractor" lowerCAmelCase__ : List[str] = "WhisperTokenizer" def __init__( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" super().__init__(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.feature_extractor __lowercase = False def a__ ( self : str , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Any]=True ) -> Optional[Any]: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase ) def __call__( self : Optional[int] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : str ) -> str: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase ) __lowercase = kwargs.pop('audio' , _UpperCAmelCase ) __lowercase = kwargs.pop('sampling_rate' , _UpperCAmelCase ) __lowercase = kwargs.pop('text' , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: __lowercase = args[0] __lowercase = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: __lowercase = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None: __lowercase = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: __lowercase = encodings['input_ids'] return inputs def a__ ( self : Optional[int] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ) -> Tuple: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Tuple , *_UpperCAmelCase : Any , **_UpperCAmelCase : Tuple ) -> Any: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]="np" ) -> str: """simple docstring""" return self.tokenizer.get_prompt_ids(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
325
import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ = """src/transformers""" # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""") # Catches a line with else: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""") def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None: return None __lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.readlines() __lowercase = 0 while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure __lowercase = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowercase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ): __lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0] __lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: __lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowercase = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __lowercase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowercase = [] while ( line_index < len(SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowercase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int: def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowercase = [] for key in import_dict_objects.keys(): __lowercase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __lowercase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowercase = 'base imports' if key == 'none' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ) -> Tuple: __lowercase = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) __lowercase = parse_init(SCREAMING_SNAKE_CASE ) if objects is not None: __lowercase = analyze_results(*SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('\n'.join(SCREAMING_SNAKE_CASE ) ) if len(SCREAMING_SNAKE_CASE ) > 0: raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0: continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace(os.path.sep , '.' ) submodules.append(SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE ) return submodules SCREAMING_SNAKE_CASE__ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def __SCREAMING_SNAKE_CASE ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. __lowercase = importlib.util.spec_from_file_location( 'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) __lowercase = spec.loader.load_module() __lowercase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F"""{list_of_modules}\n""" 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
325
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) lowerCAmelCase__ : List[Any] = "CIDAS/clipseg-rd64-refined" lowerCAmelCase__ : Dict = "image_segmenter" lowerCAmelCase__ : Union[str, Any] = CLIPSegForImageSegmentation lowerCAmelCase__ : List[Any] = ["image", "text"] lowerCAmelCase__ : Union[str, Any] = ["image"] def __init__( self : Optional[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : int ) -> int: """simple docstring""" requires_backends(self , ['vision'] ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Dict , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Any: """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='pt' ) def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" with torch.no_grad(): __lowercase = self.model(**_UpperCAmelCase ).logits return logits def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = outputs.cpu().detach().numpy() __lowercase = 0 __lowercase = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
325
import logging import os from .state import PartialState class A__ ( logging.LoggerAdapter ): @staticmethod def a__ ( _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) __lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase ) __lowercase = kwargs.pop('in_order' , _UpperCAmelCase ) if self.isEnabledFor(_UpperCAmelCase ): if self._should_log(_UpperCAmelCase ): __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) elif in_order: __lowercase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) state.wait_for_everyone() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]: if log_level is None: __lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE ) __lowercase = logging.getLogger(SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
325
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> str: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __lowercase = k.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if k.startswith('encoder' ): __lowercase = k.replace('.attn' , '.self_attn' ) __lowercase = k.replace('norm1' , 'self_attn_layer_norm' ) __lowercase = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): __lowercase = k.replace('norm1' , 'self_attn_layer_norm' ) __lowercase = k.replace('norm2' , 'encoder_attn_layer_norm' ) __lowercase = k.replace('norm3' , 'final_layer_norm' ) return k def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]: __lowercase = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: __lowercase = sd.pop(SCREAMING_SNAKE_CASE ) __lowercase = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd __lowercase = v SCREAMING_SNAKE_CASE__ = ["""START"""] @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ) -> int: __lowercase = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' ) __lowercase = model['model'] __lowercase = BlenderbotConfig.from_json_file(SCREAMING_SNAKE_CASE ) __lowercase = BlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE ) __lowercase = m.model.state_dict().keys() __lowercase = [] __lowercase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __lowercase = rename_state_dict_key(SCREAMING_SNAKE_CASE ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __lowercase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(SCREAMING_SNAKE_CASE ) m.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE ) m.half() m.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
325
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: __lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowercase = [3, 3, 3, 3] __lowercase = [5, 5, 5, 5] elif "fl4" in model_name: __lowercase = [4, 4, 4, 4] __lowercase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowercase = [3, 3, 3, 3] if "lrf" in model_name: __lowercase = [3, 3, 3, 3] else: __lowercase = [2, 2, 2, 2] if "tiny" in model_name: __lowercase = 96 elif "small" in model_name: __lowercase = 96 elif "base" in model_name: __lowercase = 128 elif "large" in model_name: __lowercase = 192 elif "xlarge" in model_name: __lowercase = 256 elif "huge" in model_name: __lowercase = 352 # set label information __lowercase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowercase = 'imagenet-22k-id2label.json' else: __lowercase = 'imagenet-1k-id2label.json' __lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = {v: k for k, v in idalabel.items()} __lowercase = FocalNetConfig( embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , ) return config def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict: if "patch_embed.proj" in name: __lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowercase = 'encoder.' + name if "encoder.layers" in name: __lowercase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowercase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowercase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowercase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowercase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowercase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowercase = 'layernorm.weight' if name == "norm.bias": __lowercase = 'layernorm.bias' if "head" in name: __lowercase = name.replace('head' , 'classifier' ) else: __lowercase = 'focalnet.' + name return name def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]: # fmt: off __lowercase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowercase = model_name_to_url[model_name] print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE ) __lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowercase = state_dict.pop(SCREAMING_SNAKE_CASE ) __lowercase = val __lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE ) __lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE ) model.eval() # load state dict model.load_state_dict(SCREAMING_SNAKE_CASE ) # verify conversion __lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowercase = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , ) __lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) __lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ) __lowercase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 ) __lowercase = model(**SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] ) elif model_name == "focalnet-tiny-lrf": __lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] ) elif model_name == "focalnet-small": __lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] ) elif model_name == "focalnet-small-lrf": __lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] ) elif model_name == "focalnet-base": __lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] ) elif model_name == "focalnet-base-lrf": __lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""focalnet-tiny""", type=str, help="""Name of the FocalNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
325
1
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 SCREAMING_SNAKE_CASE__ = { """return_dict""": False, """output_hidden_states""": True, """output_attentions""": True, """torchscript""": True, """torch_dtype""": """float16""", """use_bfloat16""": True, """tf_legacy_loss""": True, """pruned_heads""": {"""a""": 1}, """tie_word_embeddings""": False, """is_decoder""": True, """cross_attention_hidden_size""": 128, """add_cross_attention""": True, """tie_encoder_decoder""": True, """max_length""": 50, """min_length""": 3, """do_sample""": True, """early_stopping""": True, """num_beams""": 3, """num_beam_groups""": 3, """diversity_penalty""": 0.5, """temperature""": 2.0, """top_k""": 10, """top_p""": 0.7, """typical_p""": 0.2, """repetition_penalty""": 0.8, """length_penalty""": 0.8, """no_repeat_ngram_size""": 5, """encoder_no_repeat_ngram_size""": 5, """bad_words_ids""": [1, 2, 3], """num_return_sequences""": 3, """chunk_size_feed_forward""": 5, """output_scores""": True, """return_dict_in_generate""": True, """forced_bos_token_id""": 2, """forced_eos_token_id""": 3, """remove_invalid_values""": True, """architectures""": ["""BertModel"""], """finetuning_task""": """translation""", """id2label""": {0: """label"""}, """label2id""": {"""label""": """0"""}, """tokenizer_class""": """BertTokenizerFast""", """prefix""": """prefix""", """bos_token_id""": 6, """pad_token_id""": 7, """eos_token_id""": 8, """sep_token_id""": 9, """decoder_start_token_id""": 10, """exponential_decay_length_penalty""": (5, 1.01), """suppress_tokens""": [0, 1], """begin_suppress_tokens""": 2, """task_specific_params""": {"""translation""": """some_params"""}, """problem_type""": """regression""", } @is_staging_test class A__ ( unittest.TestCase ): @classmethod def a__ ( cls : str ) -> Any: """simple docstring""" __lowercase = TOKEN HfFolder.save_token(_UpperCAmelCase ) @classmethod def a__ ( cls : Optional[int] ) -> List[Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-config' ) except HTTPError: pass def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('test-config' , use_auth_token=self._token ) __lowercase = BertConfig.from_pretrained(f"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_UpperCAmelCase , repo_id='test-config' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token ) __lowercase = BertConfig.from_pretrained(f"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token ) __lowercase = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=_UpperCAmelCase , use_auth_token=self._token ) __lowercase = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" CustomConfig.register_for_auto_class() __lowercase = CustomConfig(attribute=42 ) config.push_to_hub('test-dynamic-config' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} ) __lowercase = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=_UpperCAmelCase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' ) self.assertEqual(new_config.attribute , 42 ) class A__ ( unittest.TestCase ): def a__ ( self : Dict ) -> Tuple: """simple docstring""" __lowercase = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __lowercase = c.n_embd + 1 # int __lowercase = c.resid_pdrop + 1.0 # float __lowercase = not c.scale_attn_weights # bool __lowercase = c.summary_type + 'foo' # str c.update_from_string( f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(_UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' ) self.assertEqual(_UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' ) self.assertEqual(_UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' ) self.assertEqual(_UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' ) def a__ ( self : Dict ) -> List[str]: """simple docstring""" __lowercase = PretrainedConfig() __lowercase = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( _UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) __lowercase = [key for key, value in config_common_kwargs.items() if value == getattr(_UpperCAmelCase , _UpperCAmelCase )] if len(_UpperCAmelCase ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' f""" {", ".join(_UpperCAmelCase )}.""" ) def a__ ( self : Any ) -> Any: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): # config is in subfolder, the following should not work without specifying the subfolder __lowercase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) __lowercase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' ) self.assertIsNotNone(_UpperCAmelCase ) def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" __lowercase = mock.Mock() __lowercase = 5_00 __lowercase = {} __lowercase = HTTPError __lowercase = {} # Download this model to make sure it's in the cache. __lowercase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=_UpperCAmelCase ) as mock_head: __lowercase = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = AutoConfig.from_pretrained('bert-base-cased' ) __lowercase = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(_UpperCAmelCase ) __lowercase = 2 json.dump(configuration.to_dict() , open(os.path.join(_UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __lowercase = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __lowercase = ['config.42.0.0.json'] __lowercase = 7_68 configuration.save_pretrained(_UpperCAmelCase ) shutil.move(os.path.join(_UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(_UpperCAmelCase , 'config.42.0.0.json' ) ) __lowercase = AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 7_68 ) def a__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowercase = 'hf-internal-testing/test-two-configs' import transformers as new_transformers __lowercase = 'v4.0.0' __lowercase , __lowercase = new_transformers.models.auto.AutoConfig.from_pretrained( _UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(_UpperCAmelCase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __lowercase = 'v3.0.0' __lowercase = old_transformers.models.auto.AutoConfig.from_pretrained(_UpperCAmelCase ) self.assertEqual(old_configuration.hidden_size , 7_68 )
325
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "mask2former" lowerCAmelCase__ : List[Any] = ["swin"] lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"} def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowercase = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = backbone_config.pop('model_type' ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a__ ( self : str ) -> Dict[str, any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
325
1
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = True __lowercase = True __lowercase = 99 __lowercase = 3_84 __lowercase = 2 __lowercase = 4 __lowercase = 37 __lowercase = 'gelu' __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_12 __lowercase = 16 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = 1_28 __lowercase = 2 __lowercase = 9 __lowercase = 1 __lowercase = None def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModel(config=_UpperCAmelCase ) __lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase = [input_ids, input_mask] __lowercase = model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" __lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.num_choices __lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" __lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase__ : List[str] = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : List[str] = False def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : int ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = True if hasattr(_UpperCAmelCase , 'use_cache' ): __lowercase = True __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) for model_class in self.all_model_classes: __lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = model_class(_UpperCAmelCase ) __lowercase = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' ) __lowercase = tf.keras.models.load_model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = outputs['encoder_hidden_states'] __lowercase = outputs['encoder_attentions'] else: __lowercase = outputs['hidden_states'] __lowercase = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __lowercase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_UpperCAmelCase ) def a__ ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase : int ): __lowercase = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __lowercase = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ): __lowercase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __lowercase = True __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A__ ( unittest.TestCase ): @slow def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(_UpperCAmelCase )[0] __lowercase = [1, 6, 7_68] self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
325
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __lowercase = TOKENIZER_CLASSES else: __lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __lowercase = TOKENIZER_CLASSES[tokenizer_name] __lowercase = True if checkpoint_name is None: __lowercase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowercase = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __lowercase , __lowercase = checkpoint.split('/' ) __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif add_prefix: __lowercase = checkpoint __lowercase = dump_path else: __lowercase = None __lowercase = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0] if next_char == "/": __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __lowercase = tokenizer.save_pretrained( SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(SCREAMING_SNAKE_CASE ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files.""" ) parser.add_argument( """--tokenizer_name""", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' """download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--checkpoint_name""", default=None, type=str, help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""", ) parser.add_argument( """--force_download""", action="""store_true""", help="""Re-download checkpoints.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
325
1
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) SCREAMING_SNAKE_CASE__ = pytest.mark.integration @pytest.mark.parametrize('path' , ['paws', 'csv'] ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str ) -> List[Any]: inspect_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = path + '.py' assert script_name in os.listdir(SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE ) @pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' ) @pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' ) @pytest.mark.parametrize('path' , ['accuracy'] ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: inspect_metric(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = path + '.py' assert script_name in os.listdir(SCREAMING_SNAKE_CASE ) assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'path, config_name, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> Optional[int]: __lowercase = get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> List[str]: with pytest.raises(SCREAMING_SNAKE_CASE ): get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'path, expected' , [ ('squad', 'plain_text'), ('acronym_identification', 'default'), ('lhoestq/squad', 'plain_text'), ('lhoestq/test', 'default'), ('lhoestq/demo1', 'lhoestq--demo1'), ('dalle-mini/wit', 'dalle-mini--wit'), ] , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Any: __lowercase = get_dataset_config_names(SCREAMING_SNAKE_CASE ) assert expected in config_names @pytest.mark.parametrize( 'path, expected_configs, expected_splits_in_first_config' , [ ('squad', ['plain_text'], ['train', 'validation']), ('dalle-mini/wit', ['dalle-mini--wit'], ['train']), ('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']), ] , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: __lowercase = get_dataset_infos(SCREAMING_SNAKE_CASE ) assert list(infos.keys() ) == expected_configs __lowercase = expected_configs[0] assert expected_config in infos __lowercase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( 'path, expected_config, expected_splits' , [ ('squad', 'plain_text', ['train', 'validation']), ('dalle-mini/wit', 'dalle-mini--wit', ['train']), ('paws', 'labeled_final', ['train', 'test', 'validation']), ] , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: __lowercase = get_dataset_infos(SCREAMING_SNAKE_CASE ) assert expected_config in infos __lowercase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( 'path, config_name, expected_exception' , [ ('paws', None, ValueError), ] , ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: with pytest.raises(SCREAMING_SNAKE_CASE ): get_dataset_split_names(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
325
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp SCREAMING_SNAKE_CASE__ = 5 SCREAMING_SNAKE_CASE__ = 10 @require_sentencepiece @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase = sp.SentencePieceProcessor() spm_model.Load(_UpperCAmelCase ) __lowercase = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : str ) -> int: """simple docstring""" __lowercase = '<pad>' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 10_01 ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) __lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class A__ ( unittest.TestCase ): lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase__ : Dict = "C'est trop cool" lowerCAmelCase__ : List[Any] = "Esto es genial" @classmethod def a__ ( cls : Any ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def a__ ( self : str ) -> int: """simple docstring""" self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids ) __lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2] __lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = 'fr' __lowercase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _UpperCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __lowercase = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
325
1
from typing import TYPE_CHECKING from ...utils import _LazyModule SCREAMING_SNAKE_CASE__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = "layoutlmv3" def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = max_ad_position_embeddings __lowercase = coordinate_size __lowercase = shape_size __lowercase = has_relative_attention_bias __lowercase = rel_pos_bins __lowercase = max_rel_pos __lowercase = has_spatial_attention_bias __lowercase = rel_ad_pos_bins __lowercase = max_rel_ad_pos __lowercase = text_embed __lowercase = visual_embed __lowercase = input_size __lowercase = num_channels __lowercase = patch_size __lowercase = classifier_dropout class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = version.parse("1.12" ) @property def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def a__ ( self : int ) -> float: """simple docstring""" return 1e-5 @property def a__ ( self : str ) -> int: """simple docstring""" return 12 def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowercase = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = dict( processor( _UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) ) return inputs
325
1
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __lowercase = TOKENIZER_CLASSES else: __lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __lowercase = TOKENIZER_CLASSES[tokenizer_name] __lowercase = True if checkpoint_name is None: __lowercase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowercase = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __lowercase , __lowercase = checkpoint.split('/' ) __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif add_prefix: __lowercase = checkpoint __lowercase = dump_path else: __lowercase = None __lowercase = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0] if next_char == "/": __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __lowercase = tokenizer.save_pretrained( SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(SCREAMING_SNAKE_CASE ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files.""" ) parser.add_argument( """--tokenizer_name""", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' """download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--checkpoint_name""", default=None, type=str, help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""", ) parser.add_argument( """--force_download""", action="""store_true""", help="""Re-download checkpoints.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
325
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE__ = """RegNetConfig""" # Base docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = """tabby, tabby cat""" SCREAMING_SNAKE_CASE__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A__ ( nn.Module ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any: """simple docstring""" super().__init__() __lowercase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowercase = config.num_channels def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase = self.embedder(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: """simple docstring""" super().__init__() __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) __lowercase = nn.Sequential( nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def a__ ( self : str , _UpperCAmelCase : Dict ) -> str: """simple docstring""" __lowercase = self.pooler(_UpperCAmelCase ) __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_state * attention return hidden_state class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict: """simple docstring""" super().__init__() __lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , ) def a__ ( self : Any , _UpperCAmelCase : str ) -> int: """simple docstring""" __lowercase = self.layers(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int: """simple docstring""" super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(_UpperCAmelCase ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = RegNetConfig lowerCAmelCase__ : Optional[int] = "regnet" lowerCAmelCase__ : Dict = "pixel_values" lowerCAmelCase__ : List[str] = True def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = value SCREAMING_SNAKE_CASE__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SCREAMING_SNAKE_CASE__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A__ ( lowerCAmelCase__ ): def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config __lowercase = RegNetEmbeddings(_UpperCAmelCase ) __lowercase = RegNetEncoder(_UpperCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(_UpperCAmelCase ) __lowercase = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config.num_labels __lowercase = RegNetModel(_UpperCAmelCase ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(_UpperCAmelCase ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = 'single_label_classification' else: __lowercase = 'multi_label_classification' if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000 ) -> int: __lowercase = -1 __lowercase = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c __lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a) __lowercase = n - a - b if c * c == (a * a + b * b): __lowercase = a * b * c if candidate >= product: __lowercase = candidate return product if __name__ == "__main__": print(F'''{solution() = }''')
325
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
325
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class A__ : def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" __lowercase = scheduler __lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] __lowercase = split_batches __lowercase = step_with_optimizer __lowercase = GradientState() def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowercase = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_last_lr() def a__ ( self : List[str] ) -> Tuple: """simple docstring""" return self.scheduler.state_dict() def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.scheduler.load_state_dict(_UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" return self.scheduler.get_lr() def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
325
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) class A__ ( enum.Enum ): lowerCAmelCase__ : Dict = "all_checks" lowerCAmelCase__ : List[Any] = "basic_checks" lowerCAmelCase__ : Dict = "no_checks" class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]: if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __lowercase = ' for ' + verification_name if verification_name is not None else '' if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]: if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) ) logger.info('All the splits matched successfully.' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict: if record_checksum: __lowercase = shaaaa() with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b'' ): m.update(SCREAMING_SNAKE_CASE ) __lowercase = m.hexdigest() else: __lowercase = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
325
1
from __future__ import annotations class A__ : def __init__( self : Optional[Any] , _UpperCAmelCase : int = 0 ) -> Dict: """simple docstring""" __lowercase = key def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]: """simple docstring""" assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content] def a__ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> list[str]: """simple docstring""" assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = key or self.__key or 1 # make sure key is an appropriate size key %= 2_55 return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content] def a__ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str: """simple docstring""" assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned __lowercase = '' for ch in content: ans += chr(ord(_UpperCAmelCase ) ^ key ) return ans def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> str: """simple docstring""" assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = key or self.__key or 1 # make sure key can be any size while key > 2_55: key -= 2_55 # This will be returned __lowercase = '' for ch in content: ans += chr(ord(_UpperCAmelCase ) ^ key ) return ans def a__ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int = 0 ) -> bool: """simple docstring""" assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) try: with open(_UpperCAmelCase ) as fin, open('encrypt.out' , 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(_UpperCAmelCase , _UpperCAmelCase ) ) except OSError: return False return True def a__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> bool: """simple docstring""" assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ) try: with open(_UpperCAmelCase ) as fin, open('decrypt.out' , 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(_UpperCAmelCase , _UpperCAmelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
325
import math def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict: __lowercase = factor * value __lowercase = value while not is_prime(SCREAMING_SNAKE_CASE ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE ) return value
325
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = torch.device("""cpu""") def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return im def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __lowercase = dct.pop(SCREAMING_SNAKE_CASE ) __lowercase = val def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> str: __lowercase = [] for k in state_dict.keys(): __lowercase = k if ".pwconv" in k: __lowercase = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __lowercase = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __lowercase = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __lowercase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __lowercase = k_new.split('.' ) if ls[2].isdigit(): __lowercase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __lowercase = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: __lowercase = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __lowercase = 1000 __lowercase = 'huggingface/label-files' __lowercase = 'imagenet-1k-id2label.json' __lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = idalabel __lowercase = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __lowercase = [3, 3, 6, 4] __lowercase = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __lowercase = [3, 3, 9, 6] __lowercase = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __lowercase = [4, 3, 10, 5] __lowercase = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __lowercase = [4, 4, 12, 6] __lowercase = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=SCREAMING_SNAKE_CASE ) else: __lowercase = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' ) __lowercase = checkpoint __lowercase = create_rename_keys(SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # load HuggingFace model __lowercase = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE ).eval() hf_model.load_state_dict(SCREAMING_SNAKE_CASE ) # prepare test inputs __lowercase = prepare_img() __lowercase = ViTImageProcessor.from_pretrained('preprocessor_config' ) __lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ) # compare outputs from both models __lowercase = get_expected_output(SCREAMING_SNAKE_CASE ) __lowercase = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
325
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
1
import comet # From: unbabel-comet import torch import datasets SCREAMING_SNAKE_CASE__ = datasets.logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = """\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } """ SCREAMING_SNAKE_CASE__ = """\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. """ SCREAMING_SNAKE_CASE__ = """ COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric('comet') >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'sources': datasets.Value('string' , id='sequence' ), 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ] , ) def a__ ( self : str , _UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" if self.config_name == "default": __lowercase = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: __lowercase = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def a__ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=False ) -> List[str]: """simple docstring""" if gpus is None: __lowercase = 1 if torch.cuda.is_available() else 0 __lowercase = {'src': sources, 'mt': predictions, 'ref': references} __lowercase = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )] __lowercase , __lowercase = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase ) return {"mean_score": mean_score, "scores": scores}
325
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
1
from __future__ import annotations import typing from collections import Counter def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> typing.Counter[int]: __lowercase = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(SCREAMING_SNAKE_CASE , max_perimeter + 1 ): __lowercase = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(SCREAMING_SNAKE_CASE ): __lowercase = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 1000 ) -> int: __lowercase = pythagorean_triple(SCREAMING_SNAKE_CASE ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F'''Perimeter {solution()} has maximum solutions''')
325
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = "transfo-xl" lowerCAmelCase__ : int = ["mems"] lowerCAmelCase__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple: """simple docstring""" __lowercase = vocab_size __lowercase = [] self.cutoffs.extend(_UpperCAmelCase ) if proj_share_all_but_first: __lowercase = [False] + [True] * len(self.cutoffs ) else: __lowercase = [False] + [False] * len(self.cutoffs ) __lowercase = d_model __lowercase = d_embed __lowercase = d_head __lowercase = d_inner __lowercase = div_val __lowercase = pre_lnorm __lowercase = n_layer __lowercase = n_head __lowercase = mem_len __lowercase = same_length __lowercase = attn_type __lowercase = clamp_len __lowercase = sample_softmax __lowercase = adaptive __lowercase = dropout __lowercase = dropatt __lowercase = untie_r __lowercase = init __lowercase = init_range __lowercase = proj_init_std __lowercase = init_std __lowercase = layer_norm_epsilon super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
325
1
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE__ = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> str: if "://" in dataset_path: __lowercase = dataset_path.split('://' )[1] return dataset_path def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: __lowercase = not is_remote_filesystem(SCREAMING_SNAKE_CASE ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE ) , fs._strip_protocol(SCREAMING_SNAKE_CASE ) ) else: fs.mv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , recursive=SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( ) -> None: if hasattr(fsspec.asyn , 'reset_lock' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: __lowercase = None __lowercase = None __lowercase = threading.Lock()
325
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: for attribute in key.split('.' ): __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __lowercase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowercase = None for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __lowercase = True elif name.split('.' )[0] == "proj": __lowercase = fairseq_model.proj __lowercase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowercase = True if "*" in mapped_key: __lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __lowercase = 'weight_g' elif "weight_v" in name: __lowercase = 'weight_v' elif "bias" in name: __lowercase = 'bias' elif "weight" in name: __lowercase = 'weight' else: __lowercase = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: __lowercase = full_name.split('conv_layers.' )[-1] __lowercase = name.split('.' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) __lowercase = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __lowercase = f.readlines() __lowercase = [line.split(' ' )[0] for line in lines] __lowercase = len(SCREAMING_SNAKE_CASE ) __lowercase = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]: __lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaConfig.from_pretrained( SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE ) __lowercase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowercase = model[0].eval() # set weights for wav2vec2 encoder __lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE ) __lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE ) __lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __lowercase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) __lowercase = False # add projection layer __lowercase = nn.Parameter(projection_layer.weight ) __lowercase = nn.Parameter(projection_layer.bias ) __lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE ) with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = hf_wavavec.config.to_dict() __lowercase = tokenizer.pad_token_id __lowercase = tokenizer.bos_token_id __lowercase = tokenizer.eos_token_id __lowercase = 'speech_to_text_2' __lowercase = 'wav2vec2' __lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
325
1
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar("""T""") class A__ ( Generic[T] ): def __init__( self : int , _UpperCAmelCase : bool = True ) -> None: """simple docstring""" __lowercase = {} # dictionary of lists __lowercase = directed def a__ ( self : Optional[Any] , _UpperCAmelCase : T , _UpperCAmelCase : T ) -> GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase ) self.adj_list[destination_vertex].append(_UpperCAmelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase ) __lowercase = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(_UpperCAmelCase ) __lowercase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: __lowercase = [destination_vertex] __lowercase = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase ) __lowercase = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: __lowercase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: __lowercase = [destination_vertex] __lowercase = [] return self def __repr__( self : Any ) -> str: """simple docstring""" return pformat(self.adj_list )
325
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: __lowercase = [0 for i in range(r + 1 )] # nc0 = 1 __lowercase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
325
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = ["vqvae"] def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase ) def a__ ( self : Tuple ) -> int: """simple docstring""" return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00 @torch.no_grad() def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: """simple docstring""" __lowercase = steps or self.get_default_steps() self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __lowercase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __lowercase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=_UpperCAmelCase , device=self.device , ) __lowercase = noise __lowercase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase ) __lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __lowercase = (input_image / 2_55) * 2 - 1 __lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample( generator=_UpperCAmelCase )[0] __lowercase = self.vqvae.config.scaling_factor * input_images if start_step > 0: __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] ) __lowercase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __lowercase = int(mask_start_secs * pixels_per_second ) __lowercase = int(mask_end_secs * pixels_per_second ) __lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , _UpperCAmelCase ): __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample'] else: __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] if isinstance(self.scheduler , _UpperCAmelCase ): __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] else: __lowercase = self.scheduler.step( model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample'] if mask is not None: if mask_start > 0: __lowercase = mask[:, step, :, :mask_start] if mask_end > 0: __lowercase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __lowercase = 1 / self.vqvae.config.scaling_factor * images __lowercase = self.vqvae.decode(_UpperCAmelCase )['sample'] __lowercase = (images / 2 + 0.5).clamp(0 , 1 ) __lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __lowercase = (images * 2_55).round().astype('uint8' ) __lowercase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) ) __lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) ) @torch.no_grad() def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray: """simple docstring""" assert isinstance(self.scheduler , _UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ) __lowercase = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __lowercase = (sample / 2_55) * 2 - 1 __lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __lowercase = self.scheduler.alphas_cumprod[t] __lowercase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __lowercase = 1 - alpha_prod_t __lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample'] __lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output __lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor: """simple docstring""" __lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
325
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[torch.FloatTensor] = None lowerCAmelCase__ : torch.FloatTensor = None lowerCAmelCase__ : Optional[Tuple[torch.FloatTensor]] = None lowerCAmelCase__ : Optional[Tuple[torch.FloatTensor]] = None class A__ ( lowerCAmelCase__ ): def __init__( self : int , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : List[Any]=5_12 , _UpperCAmelCase : str="cls" , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[Any]=True , **_UpperCAmelCase : Any , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __lowercase = project_dim __lowercase = pooler_fn __lowercase = learn_encoder __lowercase = use_attention_mask class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Dict = [R"pooler", R"logit_scale"] lowerCAmelCase__ : Any = [R"position_ids", R"predictions.decoder.bias"] lowerCAmelCase__ : Dict = "roberta" lowerCAmelCase__ : List[str] = RobertaSeriesConfig def __init__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = XLMRobertaModel(_UpperCAmelCase ) __lowercase = nn.Linear(config.hidden_size , config.project_dim ) __lowercase = getattr(_UpperCAmelCase , 'has_pre_transformation' , _UpperCAmelCase ) if self.has_pre_transformation: __lowercase = nn.Linear(config.hidden_size , config.project_dim ) __lowercase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[torch.Tensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> Dict: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.base_model( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_UpperCAmelCase , ) if self.has_pre_transformation: __lowercase = outputs['hidden_states'][-2] __lowercase = self.pre_LN(_UpperCAmelCase ) __lowercase = self.transformation_pre(_UpperCAmelCase ) return TransformationModelOutput( projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __lowercase = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
325
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. SCREAMING_SNAKE_CASE__ = 10 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if array[i] == target: return i return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) while left <= right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: __lowercase = one_third - 1 elif array[two_third] < target: __lowercase = two_third + 1 else: __lowercase = one_third + 1 __lowercase = two_third - 1 else: return -1 def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int: if left < right: if right - left < precision: return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = (left + right) // 3 + 1 __lowercase = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip() SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip()) SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target) SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print("""Not found""")
325
1
from math import isclose, sqrt def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ) -> tuple[float, float, float]: __lowercase = point_y / 4 / point_x __lowercase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __lowercase = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __lowercase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __lowercase = outgoing_gradient**2 + 4 __lowercase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __lowercase = (point_y - outgoing_gradient * point_x) ** 2 - 100 __lowercase = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __lowercase = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __lowercase = x_minus if isclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else x_plus __lowercase = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float = 1.4 , SCREAMING_SNAKE_CASE : float = -9.6 ) -> int: __lowercase = 0 __lowercase = first_x_coord __lowercase = first_y_coord __lowercase = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __lowercase , __lowercase , __lowercase = next_point(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F'''{solution() = }''')
325
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = module __lowercase = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) __lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ : int = "bigscience/bloom-1b7" # Constant values lowerCAmelCase__ : Any = 2.109659552692574 lowerCAmelCase__ : str = "Hello my name is" lowerCAmelCase__ : Any = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) lowerCAmelCase__ : List[Any] = 10 def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> int: """simple docstring""" __lowercase = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) __lowercase = config.to_dict() __lowercase = config.to_diff_dict() __lowercase = config.to_json_string() def a__ ( self : Dict ) -> Tuple: """simple docstring""" from bitsandbytes.nn import Paramsabit __lowercase = self.model_fpaa.get_memory_footprint() __lowercase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowercase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def a__ ( self : Tuple ) -> str: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def a__ ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = BitsAndBytesConfig() __lowercase = True __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : str ) -> List[str]: """simple docstring""" with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_fpaa.to(torch.floataa ) __lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowercase = self.model_fpaa.to('cpu' ) # Check this does not throw an error __lowercase = self.model_fpaa.half() # Check this does not throw an error __lowercase = self.model_fpaa.float() def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): @classmethod def a__ ( cls : int ) -> Tuple: """simple docstring""" __lowercase = 't5-small' __lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __lowercase = AutoTokenizer.from_pretrained(cls.model_name ) __lowercase = 'Translate in German: Hello, my dog is cute' def a__ ( self : List[Any] ) -> Dict: """simple docstring""" gc.collect() torch.cuda.empty_cache() def a__ ( self : int ) -> int: """simple docstring""" from transformers import TaForConditionalGeneration __lowercase = TaForConditionalGeneration._keep_in_fpaa_modules __lowercase = None # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) __lowercase = modules def a__ ( self : str ) -> Optional[Any]: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" super().setUp() # model_name __lowercase = 'bigscience/bloom-560m' __lowercase = 't5-small' # Different types of model __lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model __lowercase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model __lowercase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : int ) -> List[str]: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> str: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( lowerCAmelCase__ ): def a__ ( self : str ) -> str: """simple docstring""" super().setUp() def a__ ( self : Dict ) -> Any: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> int: """simple docstring""" __lowercase = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowercase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() def a__ ( self : List[Any] ) -> int: """simple docstring""" __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class A__ ( lowerCAmelCase__ ): def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = 'facebook/opt-350m' super().setUp() def a__ ( self : Dict ) -> List[str]: """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowercase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowercase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): __lowercase = LoRALayer(module.q_proj , rank=16 ) __lowercase = LoRALayer(module.k_proj , rank=16 ) __lowercase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowercase = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Any = "gpt2-xl" lowerCAmelCase__ : str = 3.3191854854152187
325
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: return (preds == labels).mean() @dataclass class A__ : lowerCAmelCase__ : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCAmelCase__ : Optional[str] = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class A__ : lowerCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) lowerCAmelCase__ : str = field(metadata={"help": "Should contain the data files for the task."} ) lowerCAmelCase__ : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCAmelCase__ : bool = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __SCREAMING_SNAKE_CASE ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) try: __lowercase = processors[data_args.task_name]() __lowercase = processor.get_labels() __lowercase = len(SCREAMING_SNAKE_CASE ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) __lowercase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets __lowercase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __lowercase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ) -> Dict: __lowercase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )} # Data collator __lowercase = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowercase = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase = trainer.evaluate() __lowercase = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) writer.write('%s = %s\n' % (key, value) ) results.update(SCREAMING_SNAKE_CASE ) return results def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
325
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __lowercase = parent __lowercase = 13 __lowercase = 7 __lowercase = True __lowercase = True __lowercase = True __lowercase = True __lowercase = 99 __lowercase = 3_84 __lowercase = 2 __lowercase = 4 __lowercase = 37 __lowercase = 'gelu' __lowercase = 0.1 __lowercase = 0.1 __lowercase = 5_12 __lowercase = 16 __lowercase = 2 __lowercase = 0.02 __lowercase = 3 __lowercase = 4 __lowercase = 1_28 __lowercase = 2 __lowercase = 9 __lowercase = 1 __lowercase = None def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase = ids_tensor([self.batch_size] , self.num_choices ) __lowercase = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModel(config=_UpperCAmelCase ) __lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowercase = [input_ids, input_mask] __lowercase = model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str: """simple docstring""" __lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = self.num_choices __lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowercase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" __lowercase = self.num_labels __lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" __lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase ) __lowercase = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowercase = model(_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ( __lowercase ) , ) = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase__ : List[str] = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : int = False lowerCAmelCase__ : List[str] = False def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" __lowercase = TFConvBertModelTester(self ) __lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def a__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def a__ ( self : int ) -> str: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def a__ ( self : Any ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = True if hasattr(_UpperCAmelCase , 'use_cache' ): __lowercase = True __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) for model_class in self.all_model_classes: __lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = model_class(_UpperCAmelCase ) __lowercase = len(model(_UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase ) __lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' ) __lowercase = tf.keras.models.load_model(_UpperCAmelCase ) __lowercase = model(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = outputs['encoder_hidden_states'] __lowercase = outputs['encoder_attentions'] else: __lowercase = outputs['hidden_states'] __lowercase = outputs['attentions'] self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) __lowercase = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_UpperCAmelCase ) def a__ ( self : Tuple ) -> Tuple: """simple docstring""" __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = True __lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) __lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase ) def check_decoder_attentions_output(_UpperCAmelCase : int ): __lowercase = len(_UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) __lowercase = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ): __lowercase = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowercase = True __lowercase = False __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __lowercase = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine __lowercase = True __lowercase = True __lowercase = model_class(_UpperCAmelCase ) __lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @require_tf class A__ ( unittest.TestCase ): @slow def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(_UpperCAmelCase )[0] __lowercase = [1, 6, 7_68] self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
325
1
from manim import * class A__ ( lowerCAmelCase__ ): def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = Rectangle(height=0.5 , width=0.5 ) __lowercase = Rectangle(height=0.25 , width=0.25 ) __lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __lowercase = [mem.copy() for i in range(6 )] __lowercase = [mem.copy() for i in range(6 )] __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = Text('CPU' , font_size=24 ) __lowercase = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) __lowercase = [mem.copy() for i in range(4 )] __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = Text('GPU' , font_size=24 ) __lowercase = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) __lowercase = [mem.copy() for i in range(6 )] __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = Text('Model' , font_size=24 ) __lowercase = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) __lowercase = [] __lowercase = [] __lowercase = [] for i, rect in enumerate(_UpperCAmelCase ): rect.set_stroke(_UpperCAmelCase ) __lowercase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=_UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=_UpperCAmelCase , buff=0.0 ) self.add(_UpperCAmelCase ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase , *_UpperCAmelCase ) __lowercase = [mem.copy() for i in range(6 )] __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = Text('Loaded Checkpoint' , font_size=24 ) __lowercase = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(_UpperCAmelCase ) __lowercase = [] __lowercase = [] for i, rect in enumerate(_UpperCAmelCase ): __lowercase = fill.copy().set_fill(_UpperCAmelCase , opacity=0.7 ) target.move_to(_UpperCAmelCase ) ckpt_arr.append(_UpperCAmelCase ) __lowercase = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) __lowercase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowercase = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) __lowercase = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) __lowercase = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) __lowercase = [meta_mem.copy() for i in range(6 )] __lowercase = [meta_mem.copy() for i in range(6 )] __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) __lowercase = Text('Disk' , font_size=24 ) __lowercase = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , Write(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) ) __lowercase = [] for i, rect in enumerate(_UpperCAmelCase ): __lowercase = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) ) self.play(*_UpperCAmelCase ) self.play(FadeOut(_UpperCAmelCase ) ) __lowercase = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , *_UpperCAmelCase ) , ) self.wait()
325
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class A__ : def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" __lowercase = scheduler __lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] __lowercase = split_batches __lowercase = step_with_optimizer __lowercase = GradientState() def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowercase = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_last_lr() def a__ ( self : List[str] ) -> Tuple: """simple docstring""" return self.scheduler.state_dict() def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.scheduler.load_state_dict(_UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" return self.scheduler.get_lr() def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
325
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ = { """configuration_swiftformer""": [ """SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwiftFormerConfig""", """SwiftFormerOnnxConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwiftFormerForImageClassification""", """SwiftFormerModel""", """SwiftFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
import collections import importlib.util import os import re from pathlib import Path SCREAMING_SNAKE_CASE__ = """src/transformers""" # Matches is_xxx_available() SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""") # Catches a line with else: SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""") def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None: return None __lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: __lowercase = f.readlines() __lowercase = 0 while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure __lowercase = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __lowercase = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ): __lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0] __lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: __lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __lowercase = lines[line_index] if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None: __lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' ) __lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0] objects.extend(SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __lowercase = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __lowercase = [] while ( line_index < len(SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __lowercase = {'none': objects} # Let's continue with backend-specific objects while line_index < len(SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. __lowercase = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __lowercase = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __lowercase = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __lowercase = lines[line_index] __lowercase = _re_import.search(SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __lowercase = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int: def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ): return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __lowercase = [] for key in import_dict_objects.keys(): __lowercase = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) __lowercase = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __lowercase = 'base imports' if key == 'none' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __SCREAMING_SNAKE_CASE ( ) -> Tuple: __lowercase = [] for root, _, files in os.walk(SCREAMING_SNAKE_CASE ): if "__init__.py" in files: __lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) __lowercase = parse_init(SCREAMING_SNAKE_CASE ) if objects is not None: __lowercase = analyze_results(*SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('\n'.join(SCREAMING_SNAKE_CASE ) ) if len(SCREAMING_SNAKE_CASE ) > 0: raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) ) def __SCREAMING_SNAKE_CASE ( ) -> Dict: __lowercase = [] for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0: continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace(os.path.sep , '.' ) submodules.append(SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue __lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) ) __lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(SCREAMING_SNAKE_CASE ) return submodules SCREAMING_SNAKE_CASE__ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def __SCREAMING_SNAKE_CASE ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. __lowercase = importlib.util.spec_from_file_location( 'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) __lowercase = spec.loader.load_module() __lowercase = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(SCREAMING_SNAKE_CASE ) > 0: __lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' F"""{list_of_modules}\n""" 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
325
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]: if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = module __lowercase = nn.Sequential( nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , ) __lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module lowerCAmelCase__ : int = "bigscience/bloom-1b7" # Constant values lowerCAmelCase__ : Any = 2.109659552692574 lowerCAmelCase__ : str = "Hello my name is" lowerCAmelCase__ : Any = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) lowerCAmelCase__ : List[Any] = 10 def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(self.model_name ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : Any ) -> Optional[Any]: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def a__ ( self : str ) -> int: """simple docstring""" __lowercase = self.model_abit.config self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) ) __lowercase = config.to_dict() __lowercase = config.to_diff_dict() __lowercase = config.to_json_string() def a__ ( self : Dict ) -> Tuple: """simple docstring""" from bitsandbytes.nn import Paramsabit __lowercase = self.model_fpaa.get_memory_footprint() __lowercase = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) __lowercase = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def a__ ( self : Tuple ) -> str: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_UpperCAmelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def a__ ( self : List[str] ) -> str: """simple docstring""" __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : Union[str, Any] ) -> str: """simple docstring""" __lowercase = BitsAndBytesConfig() __lowercase = True __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) def a__ ( self : str ) -> List[str]: """simple docstring""" with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __lowercase = BitsAndBytesConfig() with self.assertRaises(_UpperCAmelCase ): __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" with self.assertRaises(_UpperCAmelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_UpperCAmelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) __lowercase = self.model_fpaa.to(torch.floataa ) __lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error __lowercase = self.model_fpaa.to('cpu' ) # Check this does not throw an error __lowercase = self.model_fpaa.half() # Check this does not throw an error __lowercase = self.model_fpaa.float() def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class A__ ( unittest.TestCase ): @classmethod def a__ ( cls : int ) -> Tuple: """simple docstring""" __lowercase = 't5-small' __lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense __lowercase = AutoTokenizer.from_pretrained(cls.model_name ) __lowercase = 'Translate in German: Hello, my dog is cute' def a__ ( self : List[Any] ) -> Dict: """simple docstring""" gc.collect() torch.cuda.empty_cache() def a__ ( self : int ) -> int: """simple docstring""" from transformers import TaForConditionalGeneration __lowercase = TaForConditionalGeneration._keep_in_fpaa_modules __lowercase = None # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) __lowercase = modules def a__ ( self : str ) -> Optional[Any]: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` __lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) # test with `flan-t5-small` __lowercase = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) __lowercase = model.generate(**_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" super().setUp() # model_name __lowercase = 'bigscience/bloom-560m' __lowercase = 't5-small' # Different types of model __lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Sequence classification model __lowercase = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # CausalLM model __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) # Seq2seq model __lowercase = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' ) def a__ ( self : int ) -> List[str]: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> str: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class A__ ( lowerCAmelCase__ ): def a__ ( self : str ) -> str: """simple docstring""" super().setUp() def a__ ( self : Dict ) -> Any: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def a__ ( self : Tuple ) -> int: """simple docstring""" __lowercase = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass __lowercase = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class A__ ( lowerCAmelCase__ ): def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" super().setUp() def a__ ( self : List[Any] ) -> int: """simple docstring""" __lowercase = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model __lowercase = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch __lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS ) class A__ ( lowerCAmelCase__ ): def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __lowercase = 'facebook/opt-350m' super().setUp() def a__ ( self : Dict ) -> List[str]: """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters __lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): __lowercase = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability __lowercase = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_UpperCAmelCase ) ): __lowercase = LoRALayer(module.q_proj , rank=16 ) __lowercase = LoRALayer(module.k_proj , rank=16 ) __lowercase = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch __lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): __lowercase = model.forward(**_UpperCAmelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_UpperCAmelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Any = "gpt2-xl" lowerCAmelCase__ : str = 3.3191854854152187
325
import logging import os from .state import PartialState class A__ ( logging.LoggerAdapter ): @staticmethod def a__ ( _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]: """simple docstring""" if PartialState._shared_state == {}: raise RuntimeError( 'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' ) __lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase ) __lowercase = kwargs.pop('in_order' , _UpperCAmelCase ) if self.isEnabledFor(_UpperCAmelCase ): if self._should_log(_UpperCAmelCase ): __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) elif in_order: __lowercase = PartialState() for i in range(state.num_processes ): if i == state.process_index: __lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase ) self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) state.wait_for_everyone() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]: if log_level is None: __lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE ) __lowercase = logging.getLogger(SCREAMING_SNAKE_CASE ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
325
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = "naver-clova-ix/donut-base-finetuned-docvqa" lowerCAmelCase__ : List[Any] = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) lowerCAmelCase__ : Union[str, Any] = "document_qa" lowerCAmelCase__ : Any = AutoProcessor lowerCAmelCase__ : Union[str, Any] = VisionEncoderDecoderModel lowerCAmelCase__ : Optional[Any] = ["image", "text"] lowerCAmelCase__ : List[Any] = ["text"] def __init__( self : Any , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Any , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]: """simple docstring""" __lowercase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __lowercase = task_prompt.replace('{user_input}' , _UpperCAmelCase ) __lowercase = self.pre_processor.tokenizer( _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors='pt' ).input_ids __lowercase = self.pre_processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def a__ ( self : Dict , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences def a__ ( self : str , _UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.pre_processor.batch_decode(_UpperCAmelCase )[0] __lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __lowercase = re.sub(R'<.*?>' , '' , _UpperCAmelCase , count=1 ).strip() # remove first task start token __lowercase = self.pre_processor.tokenajson(_UpperCAmelCase ) return sequence["answer"]
325
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: __lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False __lowercase = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __lowercase = [3, 3, 3, 3] __lowercase = [5, 5, 5, 5] elif "fl4" in model_name: __lowercase = [4, 4, 4, 4] __lowercase = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __lowercase = [3, 3, 3, 3] if "lrf" in model_name: __lowercase = [3, 3, 3, 3] else: __lowercase = [2, 2, 2, 2] if "tiny" in model_name: __lowercase = 96 elif "small" in model_name: __lowercase = 96 elif "base" in model_name: __lowercase = 128 elif "large" in model_name: __lowercase = 192 elif "xlarge" in model_name: __lowercase = 256 elif "huge" in model_name: __lowercase = 352 # set label information __lowercase = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: __lowercase = 'imagenet-22k-id2label.json' else: __lowercase = 'imagenet-1k-id2label.json' __lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) __lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} __lowercase = {v: k for k, v in idalabel.items()} __lowercase = FocalNetConfig( embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , ) return config def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict: if "patch_embed.proj" in name: __lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: __lowercase = 'encoder.' + name if "encoder.layers" in name: __lowercase = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: __lowercase = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: __lowercase = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __lowercase = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __lowercase = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __lowercase = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": __lowercase = 'layernorm.weight' if name == "norm.bias": __lowercase = 'layernorm.bias' if "head" in name: __lowercase = name.replace('head' , 'classifier' ) else: __lowercase = 'focalnet.' + name return name def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]: # fmt: off __lowercase = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on __lowercase = model_name_to_url[model_name] print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE ) __lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): __lowercase = state_dict.pop(SCREAMING_SNAKE_CASE ) __lowercase = val __lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE ) __lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE ) model.eval() # load state dict model.load_state_dict(SCREAMING_SNAKE_CASE ) # verify conversion __lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowercase = BitImageProcessor( do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , ) __lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) __lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ) __lowercase = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 ) __lowercase = model(**SCREAMING_SNAKE_CASE ) __lowercase = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] ) elif model_name == "focalnet-tiny-lrf": __lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] ) elif model_name == "focalnet-small": __lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] ) elif model_name == "focalnet-small-lrf": __lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] ) elif model_name == "focalnet-base": __lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] ) elif model_name == "focalnet-base-lrf": __lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] ) assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""focalnet-tiny""", type=str, help="""Name of the FocalNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
325
1
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""", """facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "encodec" def __init__( self : Dict , _UpperCAmelCase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _UpperCAmelCase : Any=2_40_00 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : List[Any]=[8, 5, 4, 2] , _UpperCAmelCase : Union[str, Any]="weight_norm" , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]="reflect" , _UpperCAmelCase : Any=2 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[Any]=10_24 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=True , **_UpperCAmelCase : str , ) -> str: """simple docstring""" __lowercase = target_bandwidths __lowercase = sampling_rate __lowercase = audio_channels __lowercase = normalize __lowercase = chunk_length_s __lowercase = overlap __lowercase = hidden_size __lowercase = num_filters __lowercase = num_residual_layers __lowercase = upsampling_ratios __lowercase = norm_type __lowercase = kernel_size __lowercase = last_kernel_size __lowercase = residual_kernel_size __lowercase = dilation_growth_rate __lowercase = use_causal_conv __lowercase = pad_mode __lowercase = compress __lowercase = num_lstm_layers __lowercase = trim_right_ratio __lowercase = codebook_size __lowercase = codebook_dim if codebook_dim is not None else hidden_size __lowercase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" ) super().__init__(**_UpperCAmelCase ) @property def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def a__ ( self : Tuple ) -> int: """simple docstring""" __lowercase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def a__ ( self : int ) -> int: """simple docstring""" return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
325
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE__ = { """facebook/mask2former-swin-small-coco-instance""": ( """https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json""" ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "mask2former" lowerCAmelCase__ : List[Any] = ["swin"] lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"} def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' ) __lowercase = CONFIG_MAPPING['swin']( image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = backbone_config.pop('model_type' ) __lowercase = CONFIG_MAPPING[backbone_model_type] __lowercase = config_class.from_dict(_UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) __lowercase = backbone_config __lowercase = feature_size __lowercase = mask_feature_size __lowercase = hidden_dim __lowercase = encoder_feedforward_dim __lowercase = activation_function __lowercase = encoder_layers __lowercase = decoder_layers __lowercase = num_attention_heads __lowercase = dropout __lowercase = dim_feedforward __lowercase = pre_norm __lowercase = enforce_input_projection __lowercase = common_stride __lowercase = ignore_value __lowercase = num_queries __lowercase = no_object_weight __lowercase = class_weight __lowercase = mask_weight __lowercase = dice_weight __lowercase = train_num_points __lowercase = oversample_ratio __lowercase = importance_sample_ratio __lowercase = init_std __lowercase = init_xavier_std __lowercase = use_auxiliary_loss __lowercase = feature_strides __lowercase = output_auxiliary_logits __lowercase = decoder_layers super().__init__(**_UpperCAmelCase ) @classmethod def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict: """simple docstring""" return cls( backbone_config=_UpperCAmelCase , **_UpperCAmelCase , ) def a__ ( self : str ) -> Dict[str, any]: """simple docstring""" __lowercase = copy.deepcopy(self.__dict__ ) __lowercase = self.backbone_config.to_dict() __lowercase = self.__class__.model_type return output
325
1
import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json SCREAMING_SNAKE_CASE__ = """sshleifer/mar_enro_6_3_student""" class A__ ( lowerCAmelCase__ ): def a__ ( self : List[Any] ) -> int: """simple docstring""" super().setUp() __lowercase = cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCAmelCase , ) __lowercase = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def a__ ( self : str ) -> Optional[Any]: """simple docstring""" MarianMTModel.from_pretrained(_UpperCAmelCase ) @slow @require_torch_gpu def a__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __lowercase = { '$MAX_LEN': 64, '$BS': 64, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script __lowercase = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() __lowercase = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' ) for k, v in env_vars_to_replace.items(): __lowercase = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) ) __lowercase = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") __lowercase = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future __lowercase = ['finetune.py'] + bash_script.split() + args with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): __lowercase = argparse.ArgumentParser() __lowercase = pl.Trainer.add_argparse_args(_UpperCAmelCase ) __lowercase = SummarizationModule.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __lowercase = parser.parse_args() __lowercase = main(_UpperCAmelCase ) # Check metrics __lowercase = load_json(model.metrics_save_path ) __lowercase = metrics['val'][0] __lowercase = metrics['val'][-1] self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _UpperCAmelCase ) self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict __lowercase = os.listdir(_UpperCAmelCase ) __lowercase = [x for x in contents if x.endswith('.ckpt' )][0] __lowercase = os.path.join(args.output_dir , _UpperCAmelCase ) __lowercase = torch.load(_UpperCAmelCase , map_location='cpu' ) __lowercase = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __lowercase = {os.path.basename(_UpperCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class A__ ( lowerCAmelCase__ ): @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def a__ ( self : List[str] ) -> Dict: """simple docstring""" __lowercase = f"""{self.test_file_dir_str}/test_data/wmt_en_ro""" __lowercase = { '--fp16_opt_level=O1': '', '$MAX_LEN': 1_28, '$BS': 16, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script __lowercase = ( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) __lowercase = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' ) __lowercase = bash_script.replace('--fp16 ' , ' ' ) for k, v in env_vars_to_replace.items(): __lowercase = bash_script.replace(_UpperCAmelCase , str(_UpperCAmelCase ) ) __lowercase = self.get_auto_remove_tmp_dir() __lowercase = bash_script.replace('--fp16' , '' ) __lowercase = 6 __lowercase = ( ['distillation.py'] + bash_script.split() + [ f"""--output_dir={output_dir}""", '--gpus=1', '--learning_rate=1e-3', f"""--num_train_epochs={epochs}""", '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): __lowercase = argparse.ArgumentParser() __lowercase = pl.Trainer.add_argparse_args(_UpperCAmelCase ) __lowercase = SummarizationDistiller.add_model_specific_args(_UpperCAmelCase , os.getcwd() ) __lowercase = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu __lowercase = distill_main(_UpperCAmelCase ) # Check metrics __lowercase = load_json(model.metrics_save_path ) __lowercase = metrics['val'][0] __lowercase = metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , _UpperCAmelCase ) # check lightning ckpt can be loaded and has a reasonable statedict __lowercase = os.listdir(_UpperCAmelCase ) __lowercase = [x for x in contents if x.endswith('.ckpt' )][0] __lowercase = os.path.join(args.output_dir , _UpperCAmelCase ) __lowercase = torch.load(_UpperCAmelCase , map_location='cpu' ) __lowercase = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: __lowercase = {os.path.basename(_UpperCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
325
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: __lowercase = TOKENIZER_CLASSES else: __lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )} logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: __lowercase = TOKENIZER_CLASSES[tokenizer_name] __lowercase = True if checkpoint_name is None: __lowercase = list(tokenizer_class.max_model_input_sizes.keys() ) else: __lowercase = [checkpoint_name] logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer __lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE ) # Save fast tokenizer logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: __lowercase , __lowercase = checkpoint.split('/' ) __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif add_prefix: __lowercase = checkpoint __lowercase = dump_path else: __lowercase = None __lowercase = dump_path logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: __lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] __lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0] if next_char == "/": __lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = None logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) __lowercase = tokenizer.save_pretrained( SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE ) logger.info(F"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(SCREAMING_SNAKE_CASE ) logger.info(F"""=> removing {file_name}""" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files.""" ) parser.add_argument( """--tokenizer_name""", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' """download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--checkpoint_name""", default=None, type=str, help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""", ) parser.add_argument( """--force_download""", action="""store_true""", help="""Re-download checkpoints.""", ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
325
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[str] = "vit_msn" def __init__( self : int , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : str=12 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : str=30_72 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[str]=1e-0_6 , _UpperCAmelCase : Union[str, Any]=2_24 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Any , ) -> Dict: """simple docstring""" super().__init__(**_UpperCAmelCase ) __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = qkv_bias
325
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Tuple = "" lowerCAmelCase__ : List[str] = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[str] = None , **_UpperCAmelCase : int , ) -> List[Any]: """simple docstring""" super().__init__(self , **_UpperCAmelCase ) __lowercase = repo_info __lowercase = token __lowercase = None def a__ ( self : int ) -> int: """simple docstring""" if self.dir_cache is None: __lowercase = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes __lowercase = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(_UpperCAmelCase ): {'name': str(_UpperCAmelCase ), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str = "rb" , **_UpperCAmelCase : Any , ) -> Tuple: """simple docstring""" if not isinstance(self.repo_info , _UpperCAmelCase ): raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) __lowercase = hf_hub_url(self.repo_info.id , _UpperCAmelCase , revision=self.repo_info.sha ) return fsspec.open( _UpperCAmelCase , mode=_UpperCAmelCase , headers=get_authentication_headers_for_url(_UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open() def a__ ( self : Optional[int] , _UpperCAmelCase : Dict , **_UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" self._get_dirs() __lowercase = self._strip_protocol(_UpperCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(_UpperCAmelCase ) def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str=False , **_UpperCAmelCase : Optional[Any] ) -> int: """simple docstring""" self._get_dirs() __lowercase = PurePosixPath(path.strip('/' ) ) __lowercase = {} for p, f in self.dir_cache.items(): __lowercase = PurePosixPath(p.strip('/' ) ) __lowercase = p.parent if root == path: __lowercase = f __lowercase = list(paths.values() ) if detail: return out else: return sorted(f['name'] for f in out )
325
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp SCREAMING_SNAKE_CASE__ = 5 SCREAMING_SNAKE_CASE__ = 10 @require_sentencepiece @require_tokenizers class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[Any] = True def a__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().setUp() __lowercase = sp.SentencePieceProcessor() spm_model.Load(_UpperCAmelCase ) __lowercase = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )] __lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase = Path(self.tmpdirname ) save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self : str ) -> int: """simple docstring""" __lowercase = '<pad>' __lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(_UpperCAmelCase ) , 10_01 ) def a__ ( self : int ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_01 ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) __lowercase = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , ) __lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) __lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class A__ ( unittest.TestCase ): lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium" lowerCAmelCase__ : Dict = "C'est trop cool" lowerCAmelCase__ : List[Any] = "Esto es genial" @classmethod def a__ ( cls : Any ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def a__ ( self : Tuple ) -> Tuple: """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 1_00_00 ) def a__ ( self : str ) -> int: """simple docstring""" self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids ) __lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2] __lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) __lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase ) def a__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __lowercase = 'fr' __lowercase = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , _UpperCAmelCase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def a__ ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) __lowercase = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
325
1
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = "layoutlmv3" def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict: """simple docstring""" super().__init__( vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = max_ad_position_embeddings __lowercase = coordinate_size __lowercase = shape_size __lowercase = has_relative_attention_bias __lowercase = rel_pos_bins __lowercase = max_rel_pos __lowercase = has_spatial_attention_bias __lowercase = rel_ad_pos_bins __lowercase = max_rel_ad_pos __lowercase = text_embed __lowercase = visual_embed __lowercase = input_size __lowercase = num_channels __lowercase = patch_size __lowercase = classifier_dropout class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : int = version.parse("1.12" ) @property def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def a__ ( self : int ) -> float: """simple docstring""" return 1e-5 @property def a__ ( self : str ) -> int: """simple docstring""" return 12 def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]: """simple docstring""" setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) __lowercase = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes __lowercase = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) __lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __lowercase = dict( processor( _UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) ) return inputs
325
1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError('check_bouncy() accepts only integer arguments' ) __lowercase = str(SCREAMING_SNAKE_CASE ) __lowercase = ''.join(sorted(SCREAMING_SNAKE_CASE ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : float = 99 ) -> int: if not 0 < percent < 100: raise ValueError('solution() only accepts values from 0 to 100' ) __lowercase = 0 __lowercase = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(99)}''')
325
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # General docstring SCREAMING_SNAKE_CASE__ = """RegNetConfig""" # Base docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7] # Image classification docstring SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040""" SCREAMING_SNAKE_CASE__ = """tabby, tabby cat""" SCREAMING_SNAKE_CASE__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A__ ( nn.Module ): def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) __lowercase = ACTaFN[activation] if activation is not None else nn.Identity() def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any: """simple docstring""" super().__init__() __lowercase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) __lowercase = config.num_channels def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __lowercase = self.embedder(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]: """simple docstring""" super().__init__() __lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __lowercase = nn.BatchNormad(_UpperCAmelCase ) def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor: """simple docstring""" __lowercase = self.convolution(_UpperCAmelCase ) __lowercase = self.normalization(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: """simple docstring""" super().__init__() __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) __lowercase = nn.Sequential( nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , ) def a__ ( self : str , _UpperCAmelCase : Dict ) -> str: """simple docstring""" __lowercase = self.pooler(_UpperCAmelCase ) __lowercase = self.attention(_UpperCAmelCase ) __lowercase = hidden_state * attention return hidden_state class A__ ( nn.Module ): def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = in_channels != out_channels or stride != 1 __lowercase = max(1 , out_channels // config.groups_width ) __lowercase = ( RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __lowercase = nn.Sequential( RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __lowercase = ACTaFN[config.hidden_act] def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = hidden_state __lowercase = self.layer(_UpperCAmelCase ) __lowercase = self.shortcut(_UpperCAmelCase ) hidden_state += residual __lowercase = self.activation(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict: """simple docstring""" super().__init__() __lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __lowercase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , ) def a__ ( self : Any , _UpperCAmelCase : str ) -> int: """simple docstring""" __lowercase = self.layers(_UpperCAmelCase ) return hidden_state class A__ ( nn.Module ): def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int: """simple docstring""" super().__init__() __lowercase = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention: """simple docstring""" __lowercase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase = hidden_states + (hidden_state,) __lowercase = stage_module(_UpperCAmelCase ) if output_hidden_states: __lowercase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase ) class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Optional[Any] = RegNetConfig lowerCAmelCase__ : Optional[int] = "regnet" lowerCAmelCase__ : Dict = "pixel_values" lowerCAmelCase__ : List[str] = True def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = value SCREAMING_SNAKE_CASE__ = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SCREAMING_SNAKE_CASE__ = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class A__ ( lowerCAmelCase__ ): def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config __lowercase = RegNetEmbeddings(_UpperCAmelCase ) __lowercase = RegNetEncoder(_UpperCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" __lowercase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.embedder(_UpperCAmelCase ) __lowercase = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = encoder_outputs[0] __lowercase = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class A__ ( lowerCAmelCase__ ): def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase = config.num_labels __lowercase = RegNetModel(_UpperCAmelCase ) # classification head __lowercase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" __lowercase = return_dict if return_dict is not None else self.config.use_return_dict __lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __lowercase = outputs.pooler_output if return_dict else outputs[1] __lowercase = self.classifier(_UpperCAmelCase ) __lowercase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase = 'single_label_classification' else: __lowercase = 'multi_label_classification' if self.config.problem_type == "regression": __lowercase = MSELoss() if self.num_labels == 1: __lowercase = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __lowercase = CrossEntropyLoss() __lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase = BCEWithLogitsLoss() __lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __lowercase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
325
1
from __future__ import annotations import requests SCREAMING_SNAKE_CASE__ = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : str = "new" , SCREAMING_SNAKE_CASE : list | None = None ) -> dict: __lowercase = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(SCREAMING_SNAKE_CASE ) - valid_terms ) ): __lowercase = F"""Invalid search term: {invalid_search_terms}""" raise ValueError(SCREAMING_SNAKE_CASE ) __lowercase = requests.get( F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , ) if response.status_code == 429: raise requests.HTTPError __lowercase = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(SCREAMING_SNAKE_CASE )} __lowercase = {} for id_ in range(SCREAMING_SNAKE_CASE ): __lowercase = { item: data['data']['children'][id_]['data'][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
325
from __future__ import annotations def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
325
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : str = "mobilenet_v1" def __init__( self : str , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=2_24 , _UpperCAmelCase : int=1.0 , _UpperCAmelCase : Optional[Any]=8 , _UpperCAmelCase : List[str]="relu6" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Any=0.999 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Any=0.001 , **_UpperCAmelCase : Optional[int] , ) -> Any: """simple docstring""" super().__init__(**_UpperCAmelCase ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) __lowercase = num_channels __lowercase = image_size __lowercase = depth_multiplier __lowercase = min_depth __lowercase = hidden_act __lowercase = tf_padding __lowercase = classifier_dropout_prob __lowercase = initializer_range __lowercase = layer_norm_eps class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : str = version.parse("1.11" ) @property def a__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def a__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def a__ ( self : str ) -> float: """simple docstring""" return 1e-4
325
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger SCREAMING_SNAKE_CASE__ = get_logger(__name__) class A__ ( enum.Enum ): lowerCAmelCase__ : Dict = "all_checks" lowerCAmelCase__ : List[Any] = "basic_checks" lowerCAmelCase__ : Dict = "no_checks" class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]: if expected_checksums is None: logger.info('Unable to verify checksums.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __lowercase = ' for ' + verification_name if verification_name is not None else '' if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingChecksumError( F"""Checksums didn't match{for_verification_name}:\n""" F"""{bad_urls}\n""" 'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' ) logger.info('All the checksums matched successfully' + for_verification_name ) class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass class A__ ( lowerCAmelCase__ ): pass def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]: if expected_splits is None: logger.info('Unable to verify splits sizes.' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __lowercase = [ {'expected': expected_splits[name], 'recorded': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) ) logger.info('All the splits matched successfully.' ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict: if record_checksum: __lowercase = shaaaa() with open(SCREAMING_SNAKE_CASE , 'rb' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b'' ): m.update(SCREAMING_SNAKE_CASE ) __lowercase = m.hexdigest() else: __lowercase = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum} def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict: if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
325
1
import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class A__ ( unittest.TestCase ): def __init__( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : Any=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Tuple=5_12 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : List[Any]=4 , ) -> Tuple: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_attention_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_choices def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase = None if self.use_attention_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a__ ( self : int ) -> List[Any]: """simple docstring""" __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class A__ ( lowerCAmelCase__ , unittest.TestCase ): lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Tuple = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = FlaxRoFormerModelTester(self ) @slow def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: __lowercase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=_UpperCAmelCase ) __lowercase = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class A__ ( unittest.TestCase ): @slow def a__ ( self : str ) -> List[str]: """simple docstring""" __lowercase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) __lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] ) __lowercase = model(_UpperCAmelCase )[0] __lowercase = 5_00_00 __lowercase = (1, 6, vocab_size) self.assertEqual(output.shape , _UpperCAmelCase ) __lowercase = jnp.array( [[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
325
import math def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool: assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict: __lowercase = factor * value __lowercase = value while not is_prime(SCREAMING_SNAKE_CASE ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **SCREAMING_SNAKE_CASE ) return value
325
1
import os import pytest from transformers.dynamic_module_utils import get_imports SCREAMING_SNAKE_CASE__ = """ import os """ SCREAMING_SNAKE_CASE__ = """ def foo(): import os return False """ SCREAMING_SNAKE_CASE__ = """ def foo(): def bar(): if True: import os return False return bar() """ SCREAMING_SNAKE_CASE__ = """ import os try: import bar except ImportError: raise ValueError() """ SCREAMING_SNAKE_CASE__ = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ SCREAMING_SNAKE_CASE__ = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ SCREAMING_SNAKE_CASE__ = """ import os try: import bar except ImportError as e: raise ValueError() """ SCREAMING_SNAKE_CASE__ = """ import os try: import bar except: raise ValueError() """ SCREAMING_SNAKE_CASE__ = """ import os try: import bar import baz except ImportError: raise ValueError() """ SCREAMING_SNAKE_CASE__ = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ SCREAMING_SNAKE_CASE__ = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('case' , SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ) -> List[str]: __lowercase = os.path.join(SCREAMING_SNAKE_CASE , 'test_file.py' ) with open(SCREAMING_SNAKE_CASE , 'w' ) as _tmp_file: _tmp_file.write(SCREAMING_SNAKE_CASE ) __lowercase = get_imports(SCREAMING_SNAKE_CASE ) assert parsed_imports == ["os"]
325
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : int ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_torch def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [torch.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(_UpperCAmelCase ): __lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) ) @require_vision @require_tf class A__ ( unittest.TestCase ): def a__ ( self : Optional[Any] ) -> Any: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __lowercase = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) __lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def a__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='np' ) __lowercase = processor(images=_UpperCAmelCase , return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) @require_tf def a__ ( self : Dict ) -> List[Any]: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = [tf.ones((1, 3, 5, 5) )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = processor.post_process_masks( _UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) # should also work with np __lowercase = [np.ones((1, 3, 5, 5) )] __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) ) __lowercase = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): __lowercase = processor.post_process_masks( _UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' ) @require_vision @require_torchvision class A__ ( unittest.TestCase ): def a__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __lowercase = tempfile.mkdtemp() __lowercase = SamImageProcessor() __lowercase = SamProcessor(_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def a__ ( self : List[str] ) -> int: """simple docstring""" __lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def a__ ( self : Tuple ) -> str: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) __lowercase = [tf.convert_to_tensor(_UpperCAmelCase )] __lowercase = [torch.tensor(_UpperCAmelCase )] __lowercase = [[17_64, 26_46]] __lowercase = [[6_83, 10_24]] __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' ) __lowercase = processor.post_process_masks( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __lowercase = self.get_image_processor() __lowercase = SamProcessor(image_processor=_UpperCAmelCase ) __lowercase = self.prepare_image_inputs() __lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy() __lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() __lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
325
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : jnp.ndarray @flax_register_to_config class A__ ( nn.Module , lowerCAmelCase__ , lowerCAmelCase__ ): lowerCAmelCase__ : int = 32 lowerCAmelCase__ : int = 4 lowerCAmelCase__ : int = 4 lowerCAmelCase__ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowerCAmelCase__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") lowerCAmelCase__ : Union[bool, Tuple[bool]] = False lowerCAmelCase__ : Tuple[int] = (320, 640, 1280, 1280) lowerCAmelCase__ : int = 2 lowerCAmelCase__ : Union[int, Tuple[int]] = 8 lowerCAmelCase__ : Optional[Union[int, Tuple[int]]] = None lowerCAmelCase__ : int = 1280 lowerCAmelCase__ : float = 0.0 lowerCAmelCase__ : bool = False lowerCAmelCase__ : jnp.dtype = jnp.floataa lowerCAmelCase__ : bool = True lowerCAmelCase__ : int = 0 lowerCAmelCase__ : bool = False def a__ ( self : int , _UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict: """simple docstring""" __lowercase = (1, self.in_channels, self.sample_size, self.sample_size) __lowercase = jnp.zeros(_UpperCAmelCase , dtype=jnp.floataa ) __lowercase = jnp.ones((1,) , dtype=jnp.intaa ) __lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __lowercase , __lowercase = jax.random.split(_UpperCAmelCase ) __lowercase = {'params': params_rng, 'dropout': dropout_rng} return self.init(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )["params"] def a__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __lowercase = self.block_out_channels __lowercase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowercase = self.num_attention_heads or self.attention_head_dim # input __lowercase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __lowercase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __lowercase = FlaxTimestepEmbedding(_UpperCAmelCase , dtype=self.dtype ) __lowercase = self.only_cross_attention if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = (num_attention_heads,) * len(self.down_block_types ) # down __lowercase = [] __lowercase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(_UpperCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowercase = FlaxCrossAttnDownBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __lowercase = FlaxDownBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_UpperCAmelCase ) __lowercase = down_blocks # mid __lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up __lowercase = [] __lowercase = list(reversed(_UpperCAmelCase ) ) __lowercase = list(reversed(_UpperCAmelCase ) ) __lowercase = list(reversed(_UpperCAmelCase ) ) __lowercase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __lowercase = output_channel __lowercase = reversed_block_out_channels[i] __lowercase = reversed_block_out_channels[min(i + 1 , len(_UpperCAmelCase ) - 1 )] __lowercase = i == len(_UpperCAmelCase ) - 1 if up_block_type == "CrossAttnUpBlock2D": __lowercase = FlaxCrossAttnUpBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , prev_output_channel=_UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __lowercase = FlaxUpBlockaD( in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , prev_output_channel=_UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_UpperCAmelCase ) __lowercase = output_channel __lowercase = up_blocks # out __lowercase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __lowercase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=None , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: """simple docstring""" if not isinstance(_UpperCAmelCase , jnp.ndarray ): __lowercase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: __lowercase = timesteps.astype(dtype=jnp.floataa ) __lowercase = jnp.expand_dims(_UpperCAmelCase , 0 ) __lowercase = self.time_proj(_UpperCAmelCase ) __lowercase = self.time_embedding(_UpperCAmelCase ) # 2. pre-process __lowercase = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) __lowercase = self.conv_in(_UpperCAmelCase ) # 3. down __lowercase = (sample,) for down_block in self.down_blocks: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase , __lowercase = down_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) else: __lowercase , __lowercase = down_block(_UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __lowercase = () for down_block_res_sample, down_block_additional_residual in zip( _UpperCAmelCase , _UpperCAmelCase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __lowercase = new_down_block_res_samples # 4. mid __lowercase = self.mid_block(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __lowercase = down_block_res_samples[-(self.layers_per_block + 1) :] __lowercase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = up_block( _UpperCAmelCase , temb=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , deterministic=not train , ) else: __lowercase = up_block(_UpperCAmelCase , temb=_UpperCAmelCase , res_hidden_states_tuple=_UpperCAmelCase , deterministic=not train ) # 6. post-process __lowercase = self.conv_norm_out(_UpperCAmelCase ) __lowercase = nn.silu(_UpperCAmelCase ) __lowercase = self.conv_out(_UpperCAmelCase ) __lowercase = jnp.transpose(_UpperCAmelCase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_UpperCAmelCase )
325
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
325
1
import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : List[Any] = ["image_processor", "tokenizer"] lowerCAmelCase__ : int = "BlipImageProcessor" lowerCAmelCase__ : List[Any] = "AutoTokenizer" def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__(_UpperCAmelCase , _UpperCAmelCase ) # add QFormer tokenizer __lowercase = qformer_tokenizer def __call__( self : int , _UpperCAmelCase : ImageInput = None , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[str] , ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError('You have to specify at least images or text.' ) __lowercase = BatchFeature() if text is not None: __lowercase = self.tokenizer( text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) encoding.update(_UpperCAmelCase ) __lowercase = self.qformer_tokenizer( text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) __lowercase = qformer_text_encoding.pop('input_ids' ) __lowercase = qformer_text_encoding.pop('attention_mask' ) if images is not None: __lowercase = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) encoding.update(_UpperCAmelCase ) return encoding def a__ ( self : Union[str, Any] , *_UpperCAmelCase : str , **_UpperCAmelCase : Union[str, Any] ) -> Any: """simple docstring""" return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[Any] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[Any] ) -> List[str]: """simple docstring""" return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def a__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowercase = self.tokenizer.model_input_names __lowercase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def a__ ( self : List[str] , _UpperCAmelCase : Any , **_UpperCAmelCase : Optional[int] ) -> List[Any]: """simple docstring""" if os.path.isfile(_UpperCAmelCase ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) __lowercase = os.path.join(_UpperCAmelCase , 'qformer_tokenizer' ) self.qformer_tokenizer.save_pretrained(_UpperCAmelCase ) return super().save_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) @classmethod def a__ ( cls : str , _UpperCAmelCase : List[str] , **_UpperCAmelCase : int ) -> Tuple: """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(_UpperCAmelCase , subfolder='qformer_tokenizer' ) __lowercase = cls._get_arguments_from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) args.append(_UpperCAmelCase ) return cls(*_UpperCAmelCase )
325
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""", } class A__ ( lowerCAmelCase__ ): lowerCAmelCase__ : Union[str, Any] = "transfo-xl" lowerCAmelCase__ : int = ["mems"] lowerCAmelCase__ : Dict = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple: """simple docstring""" __lowercase = vocab_size __lowercase = [] self.cutoffs.extend(_UpperCAmelCase ) if proj_share_all_but_first: __lowercase = [False] + [True] * len(self.cutoffs ) else: __lowercase = [False] + [False] * len(self.cutoffs ) __lowercase = d_model __lowercase = d_embed __lowercase = d_head __lowercase = d_inner __lowercase = div_val __lowercase = pre_lnorm __lowercase = n_layer __lowercase = n_head __lowercase = mem_len __lowercase = same_length __lowercase = attn_type __lowercase = clamp_len __lowercase = sample_softmax __lowercase = adaptive __lowercase = dropout __lowercase = dropatt __lowercase = untie_r __lowercase = init __lowercase = init_range __lowercase = proj_init_std __lowercase = init_std __lowercase = layer_norm_epsilon super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def a__ ( self : Tuple ) -> Any: """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]: """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
325
1
from math import isqrt, loga def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]: __lowercase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowercase = False return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int: __lowercase = degree * loga(SCREAMING_SNAKE_CASE ) __lowercase = int(SCREAMING_SNAKE_CASE ) __lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE ) __lowercase = 0 __lowercase = 0 __lowercase = len(SCREAMING_SNAKE_CASE ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F'''{solution() = }''')
325
import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } SCREAMING_SNAKE_CASE__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: for attribute in key.split('.' ): __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: __lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: __lowercase = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase = value elif weight_type == "weight_g": __lowercase = value elif weight_type == "weight_v": __lowercase = value elif weight_type == "bias": __lowercase = value else: __lowercase = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: __lowercase = [] __lowercase = fairseq_model.state_dict() __lowercase = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight __lowercase = None for name, value in fairseq_dict.items(): __lowercase = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) __lowercase = True elif name.split('.' )[0] == "proj": __lowercase = fairseq_model.proj __lowercase = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowercase = True if "*" in mapped_key: __lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] __lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: __lowercase = 'weight_g' elif "weight_v" in name: __lowercase = 'weight_v' elif "bias" in name: __lowercase = 'bias' elif "weight" in name: __lowercase = 'weight' else: __lowercase = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) return proj_weight def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]: __lowercase = full_name.split('conv_layers.' )[-1] __lowercase = name.split('.' ) __lowercase = int(items[0] ) __lowercase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: __lowercase , __lowercase = emb.weight.shape __lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) __lowercase = emb.weight.data return lin_layer def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __lowercase = f.readlines() __lowercase = [line.split(' ' )[0] for line in lines] __lowercase = len(SCREAMING_SNAKE_CASE ) __lowercase = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]: __lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaConfig.from_pretrained( SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE ) __lowercase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) __lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) __lowercase = model[0].eval() # set weights for wav2vec2 encoder __lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE ) __lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE ) __lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE ) # set output linear layer unexpected_keys.remove('embed_out' ) __lowercase = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" ) logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" ) __lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) __lowercase = False # add projection layer __lowercase = nn.Parameter(projection_layer.weight ) __lowercase = nn.Parameter(projection_layer.bias ) __lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE ) with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) __lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) __lowercase = hf_wavavec.config.to_dict() __lowercase = tokenizer.pad_token_id __lowercase = tokenizer.bos_token_id __lowercase = tokenizer.eos_token_id __lowercase = 'speech_to_text_2' __lowercase = 'wav2vec2' __lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
325
1
import os def __SCREAMING_SNAKE_CASE ( ) -> List[Any]: with open(os.path.dirname(SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file: __lowercase = str(file.readlines()[0] ) __lowercase = names.replace('"' , '' ).split(',' ) names.sort() __lowercase = 0 __lowercase = 0 for i, name in enumerate(SCREAMING_SNAKE_CASE ): for letter in name: name_score += ord(SCREAMING_SNAKE_CASE ) - 64 total_score += (i + 1) * name_score __lowercase = 0 return total_score if __name__ == "__main__": print(solution())
325
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: __lowercase = [0 for i in range(r + 1 )] # nc0 = 1 __lowercase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. __lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
325
1