code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _lowerCamelCase: lowercase_ : Optional[Union[str, Path]] = None lowercase_ : bool = False lowercase_ : bool = False lowercase_ : bool = False lowercase_ : Optional[Dict] = None lowercase_ : Optional[str] = None lowercase_ : bool = False lowercase_ : bool = False lowercase_ : bool = False lowercase_ : bool = True lowercase_ : Optional[int] = None lowercase_ : int = 1 lowercase_ : Optional[Union[str, bool]] = None lowercase_ : bool = False lowercase_ : Optional[Dict] = None lowercase_ : Optional[str] = None def UpperCamelCase ( self) -> "DownloadConfig": """simple docstring""" return self.__class__(**{k: copy.deepcopy(lowerCamelCase) for k, v in self.__dict__.items()})
21
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase__ : List[Any] = 25_00_04 lowercase__ : str = 25_00_20 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MBartTokenizer _snake_case : Tuple = MBartTokenizerFast _snake_case : List[str] = True _snake_case : Optional[Any] = True def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Dict = 'facebook/mbart-large-en-ro' _snake_case : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _snake_case : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case__ ( cls : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCamelCase = 1 return cls def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def snake_case__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) _UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCAmelCase__ ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case__ ( self : Tuple ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 250004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
324
0
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class A_ : @property def lowercase ( self : Optional[int] ): return self.get_dummy_input() @property def lowercase ( self : List[str] ): if self.block_type == "down": return (4, 3_2, 1_6, 1_6) elif self.block_type == "mid": return (4, 3_2, 3_2, 3_2) elif self.block_type == "up": return (4, 3_2, 6_4, 6_4) raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' ) def lowercase ( self : str , snake_case_ : Dict=True , snake_case_ : int=False , snake_case_ : Any=False , snake_case_ : Union[str, Any]=False , ): _UpperCAmelCase = 4 _UpperCAmelCase = 3_2 _UpperCAmelCase = (3_2, 3_2) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = torch.device(snake_case_ ) _UpperCAmelCase = (batch_size, num_channels) + sizes _UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ ) _UpperCAmelCase = {"hidden_states": hidden_states} if include_temb: _UpperCAmelCase = 1_2_8 _UpperCAmelCase = randn_tensor((batch_size, temb_channels) , generator=snake_case_ , device=snake_case_ ) if include_res_hidden_states_tuple: _UpperCAmelCase = torch.manual_seed(1 ) _UpperCAmelCase = (randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ ),) if include_encoder_hidden_states: _UpperCAmelCase = floats_tensor((batch_size, 3_2, 3_2) ).to(snake_case_ ) if include_skip_sample: _UpperCAmelCase = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case_ , device=snake_case_ ) return dummy_input def lowercase ( self : List[str] ): _UpperCAmelCase = { "in_channels": 3_2, "out_channels": 3_2, "temb_channels": 1_2_8, } if self.block_type == "up": _UpperCAmelCase = 3_2 if self.block_type == "mid": init_dict.pop("out_channels" ) _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def lowercase ( self : int , snake_case_ : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common() _UpperCAmelCase = self.block_class(**snake_case_ ) unet_block.to(snake_case_ ) unet_block.eval() with torch.no_grad(): _UpperCAmelCase = unet_block(**snake_case_ ) if isinstance(snake_case_ , snake_case_ ): _UpperCAmelCase = output[0] self.assertEqual(output.shape , self.output_shape ) _UpperCAmelCase = output[0, -1, -3:, -3:] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) assert torch_all_close(output_slice.flatten() , snake_case_ , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def lowercase ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.prepare_init_args_and_inputs_for_common() _UpperCAmelCase = self.block_class(**snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(**snake_case_ ) if isinstance(snake_case_ , snake_case_ ): _UpperCAmelCase = output[0] _UpperCAmelCase = torch.device(snake_case_ ) _UpperCAmelCase = randn_tensor(output.shape , device=snake_case_ ) _UpperCAmelCase = torch.nn.functional.mse_loss(snake_case_ , snake_case_ ) loss.backward()
22
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase__ : str = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Union[str, Any] = ['pixel_values'] def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 256} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray: '''simple docstring''' return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]: '''simple docstring''' _UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase__ ): _UpperCamelCase = target_sizes.numpy() _UpperCamelCase = [] for idx in range(len(lowerCAmelCase__ ) ): _UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ ) _UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase__ ) else: _UpperCamelCase = logits.argmax(dim=1 ) _UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
324
0
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def snake_case_ ( ) -> Any: UpperCAmelCase : int = HfArgumentParser(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] UpperCAmelCase : List[Any] = TensorFlowBenchmark(args=_lowerCAmelCase ) try: UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: UpperCAmelCase : Any = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' UpperCAmelCase : Union[str, Any] = ''' '''.join(str(_lowerCAmelCase ).split(''' ''' )[:-1] ) UpperCAmelCase : str = '''''' UpperCAmelCase : Optional[int] = eval(str(_lowerCAmelCase ).split(''' ''' )[-1] ) UpperCAmelCase : Optional[Any] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCAmelCase : int = full_error_msg + begin_error_msg + str(_lowerCAmelCase ) raise ValueError(_lowerCAmelCase ) benchmark.run() if __name__ == "__main__": main()
23
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : jnp.ndarray @flax_register_to_config class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : int = 3_2 _snake_case : int = 4 _snake_case : int = 4 _snake_case : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _snake_case : Union[bool, Tuple[bool]] = False _snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _snake_case : int = 2 _snake_case : Union[int, Tuple[int]] = 8 _snake_case : Optional[Union[int, Tuple[int]]] = None _snake_case : int = 1_2_8_0 _snake_case : float = 0.0 _snake_case : bool = False _snake_case : jnp.dtype = jnp.floataa _snake_case : bool = True _snake_case : int = 0 _snake_case : bool = False def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"] def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype ) _UpperCamelCase = self.only_cross_attention if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = down_blocks # mid _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCamelCase = [] _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = output_channel _UpperCamelCase = up_blocks # out _UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 ) _UpperCamelCase = self.time_proj(lowerCAmelCase__ ) _UpperCamelCase = self.time_embedding(lowerCAmelCase__ ) # 2. pre-process _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(lowerCAmelCase__ ) # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( lowerCAmelCase__ , lowerCAmelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = new_down_block_res_samples # 4. mid _UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = up_block( lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , ) else: _UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train ) # 6. post-process _UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ ) _UpperCamelCase = nn.silu(lowerCAmelCase__ ) _UpperCamelCase = self.conv_out(lowerCAmelCase__ ) _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
324
0
def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple: __snake_case = [0] * len(snake_case_ ) __snake_case = [] __snake_case = [] __snake_case = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case_ ) ): if indegree[i] == 0: queue.append(snake_case_ ) while queue: __snake_case = queue.pop(0 ) cnt += 1 topo.append(snake_case_ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(snake_case_ ) if cnt != len(snake_case_ ): print('''Cycle exists''' ) else: print(snake_case_ ) # Adjacency List of Graph snake_case_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
24
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase__ : List[str] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase__ : Dict = logging.getLogger() def a__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int: """simple docstring""" _UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: return json.load(lowercase ) raise ValueError(F"""can't find {path}""" ) lowercase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def snake_case__ ( self : Any ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_glue.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def snake_case__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_clm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def snake_case__ ( self : Tuple ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_summarization_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def snake_case__ ( self : str ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_ta_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_ner.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_qa.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
324
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase__ : int = 1_6 UpperCAmelCase__ : int = 3_2 def lowercase_ ( _snake_case ,_snake_case = 16 ): SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" ) SCREAMING_SNAKE_CASE__ : Tuple = load_dataset("""glue""" ,"""mrpc""" ) def tokenize_function(_snake_case ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_snake_case ,max_length=_snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE__ : List[str] = datasets.map( _snake_case ,batched=_snake_case ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" ,"""labels""" ) def collate_fn(_snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE__ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE__ : Any = 8 else: SCREAMING_SNAKE_CASE__ : Optional[Any] = None return tokenizer.pad( _snake_case ,padding="""longest""" ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,return_tensors="""pt""" ,) # Instantiate dataloaders. SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader( tokenized_datasets["""train"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case ) SCREAMING_SNAKE_CASE__ : int = DataLoader( tokenized_datasets["""validation"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase__ : Union[str, Any] = mocked_dataloaders # noqa: F811 def lowercase_ ( _snake_case ,_snake_case ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_snake_case ) == "1": SCREAMING_SNAKE_CASE__ : Optional[int] = 2 # Initialize accelerator SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE__ : List[str] = config["""lr"""] SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""num_epochs"""] ) SCREAMING_SNAKE_CASE__ : List[str] = int(config["""seed"""] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config["""batch_size"""] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load("""glue""" ,"""mrpc""" ) # If the batch size is too big we use gradient accumulation SCREAMING_SNAKE_CASE__ : Dict = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: SCREAMING_SNAKE_CASE__ : Tuple = batch_size // MAX_GPU_BATCH_SIZE SCREAMING_SNAKE_CASE__ : Any = MAX_GPU_BATCH_SIZE set_seed(_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(_snake_case ,_snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE__ : Dict = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE__ : Dict = AdamW(params=model.parameters() ,lr=_snake_case ) # Instantiate scheduler SCREAMING_SNAKE_CASE__ : Dict = get_linear_schedule_with_warmup( optimizer=_snake_case ,num_warmup_steps=100 ,num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps ,) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.prepare( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) # Now we train the model for epoch in range(_snake_case ): model.train() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.loss SCREAMING_SNAKE_CASE__ : List[Any] = loss / gradient_accumulation_steps accelerator.backward(_snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] = 0 for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = accelerator.gather((predictions, batch["""labels"""]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(_snake_case ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples SCREAMING_SNAKE_CASE__ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen] SCREAMING_SNAKE_CASE__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_snake_case ,references=_snake_case ,) SCREAMING_SNAKE_CASE__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' ,_snake_case ) def lowercase_ ( ): SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" ,type=_snake_case ,default=_snake_case ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" ,) parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" ) SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args() SCREAMING_SNAKE_CASE__ : List[str] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(_snake_case ,_snake_case ) if __name__ == "__main__": main()
25
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ : Optional[Any] = logging.getLogger() def a__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Dict ) -> int: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = os.path.join(lowercase, '''all_results.json''' ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: _UpperCamelCase = json.load(lowercase ) else: raise ValueError(F"""can't find {path}""" ) return results def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod def snake_case__ ( cls : Optional[int] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Tuple ) -> int: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : int ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) ) @slow def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCAmelCase__ ) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
324
0
import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin _snake_case = 1e-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class lowercase : def __init__( self , _a , _a=16 , _a=13 , _a=7 , _a=14 , _a=10 , _a=19 , _a=5 , _a=4 , _a=True , _a=16 , _a=2 , _a=4 , _a=4 , _a="gelu" , _a=0.1 , _a=0.1 , _a=[1, 2, 3, 4, 5] , _a=25 , _a=5 , ) -> Optional[Any]: _A : str = d_model _A : Any = parent _A : List[str] = batch_size _A : Any = prediction_length _A : str = context_length _A : Any = cardinality _A : str = num_time_features _A : str = lags_sequence _A : List[Any] = embedding_dimension _A : int = is_training _A : Tuple = hidden_size _A : Any = num_hidden_layers _A : Optional[Any] = num_attention_heads _A : Tuple = intermediate_size _A : List[Any] = hidden_act _A : Tuple = hidden_dropout_prob _A : Optional[Any] = attention_probs_dropout_prob _A : Any = context_length _A : str = prediction_length + label_length _A : int = label_length _A : List[str] = moving_average _A : Dict = autocorrelation_factor def a__ ( self ) -> List[str]: return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def a__ ( self , _a ) -> Optional[int]: _A : int = config.context_length + max(config.lags_sequence ) _A : Optional[int] = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) _A : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) _A : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) _A : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs _A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) _A : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length] ) _A : str = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def a__ ( self ) -> Tuple: _A : List[Any] = self.get_config() _A : int = self.prepare_autoformer_inputs_dict(_a ) return config, inputs_dict def a__ ( self ) -> Optional[int]: _A , _A : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def a__ ( self , _a , _a ) -> Optional[Any]: _A : Dict = AutoformerModel(config=_a ).to(_a ).eval() _A : int = model(**_a ) _A : str = outputs.encoder_last_hidden_state _A : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _A : str = model.get_encoder() encoder.save_pretrained(_a ) _A : Optional[Any] = AutoformerEncoder.from_pretrained(_a ).to(_a ) _A , _A , _A , _A , _A : Optional[int] = model.create_network_inputs(**_a ) _A , _A : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) _A : Union[str, Any] = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) _A : str = encoder(inputs_embeds=_a )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) _A : str = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) _A : Optional[int] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) _A : Tuple = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) _A : int = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: _A : Tuple = model.get_decoder() decoder.save_pretrained(_a ) _A : Tuple = AutoformerDecoder.from_pretrained(_a ).to(_a ) _A : List[Any] = decoder( trend=_a , inputs_embeds=_a , encoder_hidden_states=_a , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ): _a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () _a = (AutoformerForPrediction,) if is_torch_available() else () _a = {"feature-extraction": AutoformerModel} if is_torch_available() else {} _a = False _a = False _a = False _a = False _a = False _a = False def a__ ( self ) -> Dict: _A : Optional[int] = AutoformerModelTester(self ) _A : Union[str, Any] = ConfigTester(self , config_class=_a , has_text_modality=_a ) def a__ ( self ) -> int: self.config_tester.run_common_tests() def a__ ( self ) -> Optional[int]: _A , _A : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _A : Dict = model_class(_a ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a ) _A , _A : Any = model_class.from_pretrained(_a , output_loading_info=_a ) self.assertEqual(info["""missing_keys"""] , [] ) def a__ ( self ) -> str: _A : Dict = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_a ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def a__ ( self ) -> Optional[int]: pass def a__ ( self ) -> str: _A : Union[str, Any] = inspect.signature(getattr(_a , """forward""" ) ) # The main input is the name of the argument after `self` _A : Union[str, Any] = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , _a ) def a__ ( self ) -> List[Any]: _A , _A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A : str = model_class(_a ) _A : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A : List[str] = [*signature.parameters.keys()] _A : Tuple = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(_a )] , _a ) def a__ ( self ) -> Optional[Any]: _A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _A : Any = True _A : str = getattr(self.model_tester , """seq_length""" , _a ) _A : Dict = getattr(self.model_tester , """decoder_seq_length""" , _a ) _A : str = getattr(self.model_tester , """encoder_seq_length""" , _a ) _A : List[Any] = getattr(self.model_tester , """d_model""" , _a ) _A : Optional[int] = getattr(self.model_tester , """num_attention_heads""" , _a ) _A : List[str] = d_model // num_attention_heads for model_class in self.all_model_classes: _A : Optional[Any] = True _A : List[str] = False _A : Optional[int] = True _A : Union[str, Any] = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _A : List[Any] = model(**self._prepare_for_class(_a , _a ) ) _A : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _A : Dict = True _A : List[Any] = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _A : int = model(**self._prepare_for_class(_a , _a ) ) _A : Tuple = outputs.encoder_attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) _A : List[str] = len(_a ) _A : int = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_a , _a ) # decoder attentions _A : Dict = outputs.decoder_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions _A : Optional[Any] = outputs.cross_attentions self.assertIsInstance(_a , (list, tuple) ) self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine _A : Dict = True _A : Any = True _A : str = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _A : List[str] = model(**self._prepare_for_class(_a , _a ) ) self.assertEqual(out_len + 2 , len(_a ) ) _A : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def a__ ( self ) -> int: super().test_retain_grad_hidden_states_attentions() def lowerCAmelCase_ ( snake_case_="train-batch.pt" ): _A : Optional[int] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""",filename=snake_case_,repo_type="""dataset""" ) _A : List[str] = torch.load(snake_case_,map_location=snake_case_ ) return batch @require_torch @slow class lowercase ( unittest.TestCase ): def a__ ( self ) -> Any: _A : Optional[int] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) _A : Any = prepare_batch() with torch.no_grad(): _A : Union[str, Any] = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] _A : List[Any] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , _a ) _A : str = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def a__ ( self ) -> Optional[Any]: _A : int = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) _A : Optional[Any] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _A : List[str] = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state _A : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , _a ) _A : Tuple = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_a ) self.assertTrue(torch.allclose(output[0, :3, :3] , _a , atol=_a ) ) def a__ ( self ) -> List[str]: _A : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_a ) _A : Optional[int] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): _A : str = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) _A : str = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , _a ) _A : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_a ) _A : Dict = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _a , rtol=1e-1 ) )
26
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" _UpperCamelCase = iter(lowercase ) while True: _UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) ) if not chunk: return yield chunk def a__ ( lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCamelCase = '''''' if len(lowercase ) < 2: return dirty for i in range(len(lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase ) & 1: clean += "X" return clean def a__ ( lowercase : str ) -> list[str]: """simple docstring""" _UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCamelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase ) return table def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = prepare_input(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
324
0
'''simple docstring''' import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = AudioLDMPipeline A_ = TEXT_TO_AUDIO_PARAMS A_ = TEXT_TO_AUDIO_BATCH_PARAMS A_ = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , ) __a : Dict = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) __a : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __a : Any = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , ) __a : List[str] = ClapTextModelWithProjection(__a ) __a : Any = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 ) __a : str = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , ) __a : Optional[int] = SpeechTaHifiGan(__a ) __a : Dict = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' if str(__a ).startswith('mps' ): __a : List[str] = torch.manual_seed(__a ) else: __a : Union[str, Any] = torch.Generator(device=__a ).manual_seed(__a ) __a : int = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : Tuple = self.get_dummy_components() __a : List[str] = AudioLDMPipeline(**__a ) __a : int = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : List[Any] = self.get_dummy_inputs(__a ) __a : Any = audioldm_pipe(**__a ) __a : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__a ) == 256 __a : Dict = audio[:10] __a : Union[str, Any] = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : str = self.get_dummy_components() __a : Optional[Any] = AudioLDMPipeline(**__a ) __a : Any = audioldm_pipe.to(__a ) __a : List[Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Any = self.get_dummy_inputs(__a ) __a : Optional[int] = 3 * [inputs['prompt']] # forward __a : List[Any] = audioldm_pipe(**__a ) __a : str = output.audios[0] __a : Any = self.get_dummy_inputs(__a ) __a : Optional[Any] = 3 * [inputs.pop('prompt' )] __a : int = audioldm_pipe.tokenizer( __a , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='pt' , ) __a : str = text_inputs['input_ids'].to(__a ) __a : Optional[int] = audioldm_pipe.text_encoder( __a , ) __a : int = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __a : Union[str, Any] = F.normalize(__a , dim=-1 ) __a : Optional[int] = prompt_embeds # forward __a : Union[str, Any] = audioldm_pipe(**__a ) __a : Optional[int] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = self.get_dummy_components() __a : Optional[int] = AudioLDMPipeline(**__a ) __a : Union[str, Any] = audioldm_pipe.to(__a ) __a : Union[str, Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : str = self.get_dummy_inputs(__a ) __a : List[str] = 3 * ['this is a negative prompt'] __a : int = negative_prompt __a : List[Any] = 3 * [inputs['prompt']] # forward __a : Dict = audioldm_pipe(**__a ) __a : str = output.audios[0] __a : List[Any] = self.get_dummy_inputs(__a ) __a : List[str] = 3 * [inputs.pop('prompt' )] __a : int = [] for p in [prompt, negative_prompt]: __a : List[str] = audioldm_pipe.tokenizer( __a , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='pt' , ) __a : Dict = text_inputs['input_ids'].to(__a ) __a : List[Any] = audioldm_pipe.text_encoder( __a , ) __a : Tuple = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __a : Optional[int] = F.normalize(__a , dim=-1 ) embeds.append(__a ) __a , __a : Union[str, Any] = embeds # forward __a : List[str] = audioldm_pipe(**__a ) __a : Optional[Any] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : Tuple = self.get_dummy_components() __a : Optional[int] = PNDMScheduler(skip_prk_steps=__a ) __a : List[Any] = AudioLDMPipeline(**__a ) __a : int = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : List[str] = self.get_dummy_inputs(__a ) __a : str = 'egg cracking' __a : Union[str, Any] = audioldm_pipe(**__a , negative_prompt=__a ) __a : List[str] = output.audios[0] assert audio.ndim == 1 assert len(__a ) == 256 __a : Tuple = audio[:10] __a : List[Any] = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : int = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : int = self.get_dummy_components() __a : Dict = PNDMScheduler(skip_prk_steps=__a ) __a : Any = AudioLDMPipeline(**__a ) __a : Dict = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : List[str] = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) __a : str = audioldm_pipe(__a , num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts __a : Tuple = 2 __a : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt __a : Tuple = 2 __a : Dict = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts __a : Optional[Any] = 2 __a : List[str] = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __a : int = self.get_dummy_components() __a : Optional[Any] = AudioLDMPipeline(**__a ) __a : Any = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Dict = audioldm_pipe.vocoder.config.sampling_rate __a : Union[str, Any] = self.get_dummy_inputs(__a ) __a : str = audioldm_pipe(audio_length_in_s=0.016 , **__a ) __a : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__a ) / vocoder_sampling_rate == 0.016 __a : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **__a ) __a : Optional[Any] = output.audios[0] assert audio.ndim == 1 assert len(__a ) / vocoder_sampling_rate == 0.032 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Optional[Any] = self.get_dummy_components() __a : int = AudioLDMPipeline(**__a ) __a : Optional[Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = ['hey'] __a : Optional[Any] = audioldm_pipe(__a , num_inference_steps=1 ) __a : int = output.audios.shape assert audio_shape == (1, 256) __a : str = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __a : Any = SpeechTaHifiGan(__a ).to(__a ) __a : str = audioldm_pipe(__a , num_inference_steps=1 ) __a : int = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a ) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical(test_mean_pixel_difference=__a ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __UpperCAmelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a ) @slow class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self , __a , __a="cpu" , __a=torch.floataa , __a=0 ): '''simple docstring''' __a : Tuple = torch.Generator(device=__a ).manual_seed(__a ) __a : List[str] = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) ) __a : Tuple = torch.from_numpy(__a ).to(device=__a , dtype=__a ) __a : Optional[Any] = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) __a : int = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Optional[Any] = self.get_inputs(__a ) __a : Optional[Any] = 25 __a : List[Any] = audioldm_pipe(**__a ).audios[0] assert audio.ndim == 1 assert len(__a ) == 8_1920 __a : List[str] = audio[7_7230:7_7240] __a : str = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) __a : Dict = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def __UpperCAmelCase ( self ): '''simple docstring''' __a : Any = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) __a : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) __a : Optional[Any] = audioldm_pipe.to(__a ) audioldm_pipe.set_progress_bar_config(disable=__a ) __a : Dict = self.get_inputs(__a ) __a : Tuple = audioldm_pipe(**__a ).audios[0] assert audio.ndim == 1 assert len(__a ) == 8_1920 __a : int = audio[2_7780:2_7790] __a : str = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] ) __a : Optional[Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
27
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Any = {'vocab_file': 'spiece.model'} lowercase__ : Dict = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } lowercase__ : Optional[Any] = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ['input_ids', 'attention_mask'] _snake_case : List[int] = [] def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> Tuple: '''simple docstring''' return self.sp_model.get_piece_size() def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = '''''' _UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__ ) + token _UpperCamelCase = True _UpperCamelCase = [] else: current_sub_tokens.append(lowerCAmelCase__ ) _UpperCamelCase = False out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str: '''simple docstring''' _UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ ) _UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCamelCase = [] _UpperCamelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) _UpperCamelCase = [] sub_texts.append(lowerCAmelCase__ ) else: current_sub_text.append(lowerCAmelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: _UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) ) else: _UpperCamelCase = ''''''.join(lowerCAmelCase__ ) _UpperCamelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ ) return clean_text else: return text def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] _UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
324
0
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Any = logging.get_logger(__name__) @add_end_docstrings(_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ): """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) requires_backends(self , 'decord' ) self.check_model_type(UpperCamelCase__ ) def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ): """simple docstring""" UpperCamelCase = {} if frame_sampling_rate is not None: UpperCamelCase = frame_sampling_rate if num_frames is not None: UpperCamelCase = num_frames UpperCamelCase = {} if top_k is not None: UpperCamelCase = top_k return preprocess_params, {}, postprocess_params def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ): """simple docstring""" return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ): """simple docstring""" if num_frames is None: UpperCamelCase = self.model.config.num_frames if video.startswith('http://' ) or video.startswith('https://' ): UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content ) UpperCamelCase = VideoReader(UpperCamelCase__ ) videoreader.seek(0 ) UpperCamelCase = 0 UpperCamelCase = num_frames * frame_sampling_rate - 1 UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa ) UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy() UpperCamelCase = list(UpperCamelCase__ ) UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework ) return model_inputs def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ): """simple docstring""" UpperCamelCase = self.model(**UpperCamelCase__ ) return model_outputs def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ): """simple docstring""" if top_k > self.model.config.num_labels: UpperCamelCase = self.model.config.num_labels if self.framework == "pt": UpperCamelCase = model_outputs.logits.softmax(-1 )[0] UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ ) else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCamelCase = scores.tolist() UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
28
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 'audio-spectrogram-transformer' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = patch_size _UpperCamelCase = qkv_bias _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins
324
0
def lowercase__ ( __snake_case : dict ): '''simple docstring''' UpperCAmelCase_ : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack UpperCAmelCase_ : set[int] = set() return any( node not in visited and depth_first_search(__snake_case , __snake_case , __snake_case , __snake_case ) for node in graph ) def lowercase__ ( __snake_case : dict , __snake_case : int , __snake_case : set , __snake_case : set ): '''simple docstring''' visited.add(__snake_case ) rec_stk.add(__snake_case ) for node in graph[vertex]: if node not in visited: if depth_first_search(__snake_case , __snake_case , __snake_case , __snake_case ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__snake_case ) return False if __name__ == "__main__": from doctest import testmod testmod()
29
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase__ : Union[str, Any] = logging.get_logger(__name__) # General docstring lowercase__ : Dict = 'ResNetConfig' # Base docstring lowercase__ : str = 'microsoft/resnet-50' lowercase__ : Tuple = [1, 20_48, 7, 7] # Image classification docstring lowercase__ : Optional[Any] = 'microsoft/resnet-50' lowercase__ : List[str] = 'tiger cat' lowercase__ : List[Any] = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad( lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCamelCase = config.num_channels def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.pooler(lowerCAmelCase__ ) return embedding class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = out_channels // reduction _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = input for layer in self.layers: _UpperCamelCase = layer(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowerCAmelCase__ ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = ResNetConfig _snake_case : Union[str, Any] = 'resnet' _snake_case : Optional[int] = 'pixel_values' _snake_case : int = True def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' if isinstance(lowerCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = value lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowerCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config.num_labels _UpperCamelCase = ResNetModel(lowerCAmelCase__ ) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowerCAmelCase__ ) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = '''single_label_classification''' else: _UpperCamelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) super()._init_backbone(lowerCAmelCase__ ) _UpperCamelCase = [config.embedding_size] + config.hidden_sizes _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.hidden_states _UpperCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
324
0
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __a = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __a = [0, 2_5, 5_0] __a = [2_5, 5_0, 7_5] __a = fuzz.membership.trimf(X, abca) __a = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __a = np.ones(7_5) __a = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) __a = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __a = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __a = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __a = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __a = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __a = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __a = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __a = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
30
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def a__ ( lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" if isinstance(lowercase, collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCAmelCase : """simple docstring""" def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple ) -> int: '''simple docstring''' pass def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str: '''simple docstring''' _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = after_output[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple: '''simple docstring''' pt_model.to(lowerCAmelCase__ ) pt_model.eval() # prepare inputs _UpperCamelCase = inputs_dict _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple() _UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ ) _UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ ) pt_model_loaded.to(lowerCAmelCase__ ) pt_model_loaded.eval() with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 ) def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ ) _UpperCamelCase = fx_state self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__ ) @is_pt_flax_cross_test def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_inputs_dict.pop('''vision_config''' ) _UpperCamelCase = config_inputs_dict.pop('''text_config''' ) _UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = after_outputs[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-5 ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxViTModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = FlaxViTModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : List[str] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ) _UpperCamelCase = model(**lowerCAmelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
324
0
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> str: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release: # old versions of hfh don't url-encode the file path _UpperCAmelCase : Union[str, Any] = quote(_UpperCAmelCase ) return hfh.hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" , revision=_UpperCAmelCase )
31
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Union[str, Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] _UpperCamelCase = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase__ ) _UpperCamelCase = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
324
0
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Optional[int]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=9_9 , SCREAMING_SNAKE_CASE__ : str=2_4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=6 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_7 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Tuple=1_0_0_0 , ) -> str: a_ : Optional[Any] = parent a_ : List[str] = batch_size a_ : List[str] = seq_length a_ : str = is_training a_ : str = use_input_mask a_ : int = use_token_type_ids a_ : List[str] = use_labels a_ : Optional[int] = vocab_size a_ : Any = hidden_size a_ : int = num_hidden_layers a_ : List[str] = num_attention_heads a_ : str = intermediate_size a_ : Union[str, Any] = hidden_act a_ : List[str] = hidden_dropout_prob a_ : int = attention_probs_dropout_prob a_ : int = max_position_embeddings a_ : Tuple = type_vocab_size a_ : Optional[Any] = type_sequence_label_size a_ : Tuple = initializer_range a_ : Dict = num_labels a_ : str = scope a_ : Optional[int] = range_bbox def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a_ : int = bbox[i, j, 3] a_ : str = bbox[i, j, 1] a_ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: a_ : Tuple = bbox[i, j, 2] a_ : List[str] = bbox[i, j, 0] a_ : Union[str, Any] = t a_ : List[Any] = None if self.use_input_mask: a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) a_ : List[Any] = None if self.use_token_type_ids: a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) a_ : int = None a_ : Tuple = None if self.use_labels: a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a_ : Optional[int] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE ( self : Dict ) -> int: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> str: a_ : Any = LiltModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : Any = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> int: a_ : Any = self.num_labels a_ : str = LiltForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : str = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str: a_ : Union[str, Any] = LiltForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() a_ : List[str] = model( SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: a_ : int = self.prepare_config_and_inputs() ( ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ( a_ ) , ) : List[Any] = config_and_inputs a_ : Optional[int] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): snake_case__ : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) snake_case__ : str = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : List[str] = False snake_case__ : str = False def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: return True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: a_ : str = LiltModelTester(self ) a_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: a_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a_ : List[str] = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: a_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) @slow def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a_ : List[Any] = LiltModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: a_ : List[str] = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(SCREAMING_SNAKE_CASE__ ) a_ : str = torch.tensor([[1, 2]] , device=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): a_ : str = model(input_ids=SCREAMING_SNAKE_CASE__ , bbox=SCREAMING_SNAKE_CASE__ ) a_ : Optional[int] = torch.Size([1, 2, 7_6_8] ) a_ : int = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=SCREAMING_SNAKE_CASE__ , ) self.assertTrue(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
32
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std def snake_case__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Tuple = LevitImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = LevitImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
324
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def lowercase ( __snake_case : List[Any] ): lowercase_ : int = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase_ : Any = json.loads(open(__snake_case ).read() ) if not params: raise ValueError( F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('''.pt''' ): lowercase_ : Dict = args.output + '''.pt''' lowercase_ : Any = OrderedDict() with tf.device('''/CPU:0''' ): lowercase_ : int = tf.train.load_checkpoint(args.tf_model_dir ) lowercase_ : Optional[int] = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase_ : int = reader.get_tensor(__snake_case ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase_ : Optional[Any] = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase_ : Tuple = 8 lowercase_ : str = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[Any] = torch.tensor(__snake_case ) elif key_name.startswith('''model/moe''' ): lowercase_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase_ : List[str] = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Tuple = torch.tensor(__snake_case ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase_ : List[str] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[str] = torch.tensor(__snake_case ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase_ : Union[str, Any] = key_name[-9:-7] for i in range(1_6 ): lowercase_ : Tuple = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase_ : str = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase_ : Tuple = torch.tensor(__snake_case ) elif key_name.startswith('''model/mlp''' ): lowercase_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase_ : List[Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Dict = torch.tensor(__snake_case ) elif key_name.endswith('''/p1/bias''' ): lowercase_ : Any = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase_ : Any = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.endswith('''/p2/kernel''' ): lowercase_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase_ : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : int = torch.tensor(__snake_case ) elif key_name.endswith('''/p2/bias''' ): lowercase_ : Optional[int] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase_ : List[str] = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.startswith('''model/ln''' ): lowercase_ : Optional[Any] = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase_ : str = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase_ : Any = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): lowercase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional lowercase_ : int = torch.tensor(__snake_case ) elif key_name.startswith('''model/att''' ): lowercase_ : Optional[int] = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase_ : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase_ : Tuple = state[:, 0, :, :] lowercase_ : Dict = state[:, 1, :, :] lowercase_ : Union[str, Any] = state[:, 2, :, :] lowercase_ : int = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[Any] = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Union[str, Any] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : List[str] = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase_ : str = torch.tensor(__snake_case ) lowercase_ : str = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase_ : Any = torch.tensor(__snake_case ) lowercase_ : List[Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase_ : Any = torch.tensor(__snake_case ) elif key_name.endswith('''/o/kernel''' ): lowercase_ : Dict = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase_ : Optional[int] = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase_ : Any = torch.tensor(__snake_case ) elif key_name.startswith('''model/an''' ): lowercase_ : str = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase_ : Dict = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase_ : Union[str, Any] = vnp.copy() # same because it is one dimensional lowercase_ : str = torch.tensor(__snake_case ) elif key_name.endswith('''/g''' ): lowercase_ : str = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase_ : Optional[Any] = vnp.copy() # same because it is one dimensional lowercase_ : Any = torch.tensor(__snake_case ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase_ : int = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase_ : str = '''model.%s.weight''' % nlayer lowercase_ : int = vnp.copy() # same in embedded lowercase_ : int = torch.tensor(__snake_case ) if key_name.startswith('''model/wte''' ): lowercase_ : Dict = '''lm_head.weight''' lowercase_ : Tuple = vnp.copy() # same in embedded lowercase_ : Dict = torch.tensor(__snake_case ) elif key_name.startswith('''model/wob''' ): lowercase_ : int = '''final_logits_bias''' lowercase_ : Any = vnp.copy() # same in embedded lowercase_ : Optional[Any] = state.reshape((1, -1) ) lowercase_ : Any = torch.tensor(__snake_case ) elif key_name == "model/dense/kernel": lowercase_ : List[str] = '''model.last_project.weight''' lowercase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase_ : Optional[Any] = torch.tensor(__snake_case ) elif key_name == "model/dense_1/bias": lowercase_ : Dict = '''model.last_project.bias''' lowercase_ : Tuple = vnp.copy() # same because it is one dimensional lowercase_ : Any = torch.tensor(__snake_case ) torch.save(__snake_case , args.output ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') __A : Any = parser.parse_args() convert_tf_gptsan_to_pt(args)
33
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE lowercase__ : int = 'config.json' lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin' lowercase__ : List[str] = 'diffusion_flax_model.msgpack' lowercase__ : str = 'model.onnx' lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors' lowercase__ : List[str] = 'weights.pb' lowercase__ : str = 'https://huggingface.co' lowercase__ : str = default_cache_path lowercase__ : Optional[int] = 'diffusers_modules' lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules')) lowercase__ : Tuple = ['fp16', 'non-ema'] lowercase__ : int = '.self_attn'
324
0
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class _a : def __init__( self : Union[str, Any] , lowercase : int , lowercase : MutableSequence[float] ): '''simple docstring''' if len(lowercase ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) UpperCAmelCase = list(lowercase ) UpperCAmelCase = degree def __add__( self : List[Any] , lowercase : Polynomial ): '''simple docstring''' if self.degree > polynomial_a.degree: UpperCAmelCase = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , lowercase ) else: UpperCAmelCase = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , lowercase ) def __sub__( self : str , lowercase : Polynomial ): '''simple docstring''' return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Optional[int] ): '''simple docstring''' return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : List[Any] , lowercase : Polynomial ): '''simple docstring''' UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , lowercase ) def A ( self : Optional[int] , lowercase : int | float ): '''simple docstring''' UpperCAmelCase = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : str ): '''simple docstring''' UpperCAmelCase = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase ) return polynomial def __repr__( self : List[Any] ): '''simple docstring''' return self.__str__() def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = [0] * self.degree for i in range(self.degree ): UpperCAmelCase = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , lowercase ) def A ( self : str , lowercase : int | float = 0 ): '''simple docstring''' UpperCAmelCase = [0] * (self.degree + 2) UpperCAmelCase = constant for i in range(self.degree + 1 ): UpperCAmelCase = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , lowercase ) def __eq__( self : List[Any] , lowercase : object ): '''simple docstring''' if not isinstance(lowercase , lowercase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Tuple , lowercase : object ): '''simple docstring''' return not self.__eq__(lowercase )
34
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : str = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def a__ ( lowercase : str ) -> Dict: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase = k.replace(lowercase, lowercase ) if k.startswith('''encoder''' ): _UpperCamelCase = k.replace('''.attn''', '''.self_attn''' ) _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' ) return k def a__ ( lowercase : List[str] ) -> List[Any]: """simple docstring""" _UpperCamelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _UpperCamelCase = sd.pop(lowercase ) _UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd _UpperCamelCase = v lowercase__ : str = ['START'] @torch.no_grad() def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict: """simple docstring""" _UpperCamelCase = torch.load(lowercase, map_location='''cpu''' ) _UpperCamelCase = model['''model'''] _UpperCamelCase = BlenderbotConfig.from_json_file(lowercase ) _UpperCamelCase = BlenderbotForConditionalGeneration(lowercase ) _UpperCamelCase = m.model.state_dict().keys() _UpperCamelCase = [] _UpperCamelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase = rename_state_dict_key(lowercase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowercase ) m.model.load_state_dict(lowercase, strict=lowercase ) m.half() m.save_pretrained(lowercase ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
324
0
'''simple docstring''' from pickle import UnpicklingError import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict from ..utils import logging __a = logging.get_logger(__name__) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: try: with open(_lowerCAmelCase , """rb""" ) as flax_state_f: snake_case__ : Any = from_bytes(_lowerCAmelCase , flax_state_f.read() ) except UnpicklingError as e: try: with open(_lowerCAmelCase ) as f: if f.read().startswith("""version""" ): raise OSError( """You seem to have cloned a repository without having git-lfs installed. Please""" """ install git-lfs and run `git lfs install` followed by `git lfs pull` in the""" """ folder you cloned.""" ) else: raise ValueError from e except (UnicodeDecodeError, ValueError): raise EnvironmentError(f"Unable to convert {model_file} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase ) def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int: try: import torch # noqa: F401 except ImportError: logger.error( """Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see""" """ https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation""" """ instructions.""" ) raise # check if we have bf16 weights snake_case__ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values() if any(_lowerCAmelCase ): # convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( """Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """ """before loading those in PyTorch model.""" ) snake_case__ : Optional[Any] = jax.tree_util.tree_map( lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase ) snake_case__ : Optional[int] = """""" snake_case__ : Any = flatten_dict(_lowerCAmelCase , sep=""".""" ) snake_case__ : Union[str, Any] = pt_model.state_dict() # keep track of unexpected & missing keys snake_case__ : Any = [] snake_case__ : List[Any] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): snake_case__ : str = flax_key_tuple.split(""".""" ) if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4: snake_case__ : Dict = flax_key_tuple_array[:-1] + ["""weight"""] snake_case__ : List[Any] = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple_array[-1] == "kernel": snake_case__ : str = flax_key_tuple_array[:-1] + ["""weight"""] snake_case__ : Dict = flax_tensor.T elif flax_key_tuple_array[-1] == "scale": snake_case__ : Dict = flax_key_tuple_array[:-1] + ["""weight"""] if "time_embedding" not in flax_key_tuple_array: for i, flax_key_tuple_string in enumerate(_lowerCAmelCase ): snake_case__ : int = ( flax_key_tuple_string.replace("""_0""" , """.0""" ) .replace("""_1""" , """.1""" ) .replace("""_2""" , """.2""" ) .replace("""_3""" , """.3""" ) .replace("""_4""" , """.4""" ) .replace("""_5""" , """.5""" ) .replace("""_6""" , """.6""" ) .replace("""_7""" , """.7""" ) .replace("""_8""" , """.8""" ) .replace("""_9""" , """.9""" ) ) snake_case__ : List[Any] = """.""".join(_lowerCAmelCase ) if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict snake_case__ : Tuple = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor snake_case__ : Optional[int] = torch.from_numpy(_lowerCAmelCase ) # remove from missing keys missing_keys.remove(_lowerCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_lowerCAmelCase ) pt_model.load_state_dict(_lowerCAmelCase ) # re-transform missing_keys to list snake_case__ : Tuple = list(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: logger.warning( """Some weights of the Flax model were not used when initializing the PyTorch model""" f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" """ (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This""" f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" """ to be exactly identical (e.g. initializing a BertForSequenceClassification model from a""" """ FlaxBertForSequenceClassification model).""" ) if len(_lowerCAmelCase ) > 0: logger.warning( f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" """ use it for predictions and inference.""" ) return pt_model
35
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Tuple = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _snake_case = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def A ( _lowerCamelCase ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_lowerCamelCase ) def A ( _lowerCamelCase ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _lowerCAmelCase : int = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
36
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowercase__ : Any = logging.get_logger(__name__) class __lowerCAmelCase : """simple docstring""" _snake_case : List[str] = None @experimental def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int: """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]: """simple docstring""" _UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase ) _UpperCamelCase = [] # We organize the splits ourselve (contiguous splits) for index in range(lowercase ): _UpperCamelCase = len(lowercase ) // num_proc _UpperCamelCase = len(lowercase ) % num_proc _UpperCamelCase = div * index + min(lowercase, lowercase ) _UpperCamelCase = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(lowercase )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) _UpperCamelCase , _UpperCamelCase = None, None if not disable_tqdm: _UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool: _UpperCamelCase = pool.map(lowercase, lowercase ) logger.info(F"""Finished {num_proc} processes""" ) _UpperCamelCase = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(lowercase )} objects""" ) return mapped def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any: """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ): return joblib.Parallel()( joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def a__ ( lowercase : str ) -> Optional[int]: """simple docstring""" _UpperCamelCase = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: _UpperCamelCase = None
324
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _lowerCAmelCase = False class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase_ ( self ) -> Optional[Any]: lowerCAmelCase__ : Optional[int] = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) lowerCAmelCase__ : Tuple = torch.manual_seed(0 ) lowerCAmelCase__ : Dict = pipe( image=__UpperCAmelCase ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="""numpy""" ,).images lowerCAmelCase__ : List[Any] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ : Tuple = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
37
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str: '''simple docstring''' if not batched: _UpperCamelCase = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): _UpperCamelCase , _UpperCamelCase = image.size else: _UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2] if w < h: _UpperCamelCase = int(self.size['''shortest_edge'''] * h / w ) _UpperCamelCase = self.size['''shortest_edge'''] elif w > h: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = self.size['''shortest_edge'''] else: _UpperCamelCase = [] for image in image_inputs: _UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = DeformableDetrImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) _UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' pass def snake_case__ ( self : int ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : str ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them _UpperCamelCase = DeformableDetrImageProcessor() _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) ) @slow def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} _UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify masks _UpperCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
324
0
from collections.abc import Generator from math import sin def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" if len(__magic_name__ ) != 32: raise ValueError("""Input must be of length 32""" ) UpperCamelCase :int = B"""""" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :Any = format(__magic_name__ , """08x""" )[-8:] UpperCamelCase :Union[str, Any] = B"""""" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" ) return little_endian_hex def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :str = B"""""" for char in message: bit_string += format(__magic_name__ , """08b""" ).encode("""utf-8""" ) UpperCamelCase :Any = format(len(__magic_name__ ) , """064b""" ).encode("""utf-8""" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__magic_name__ ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(__magic_name__ ) % 512 != 0: raise ValueError("""Input must have length that's a multiple of 512""" ) for pos in range(0 , len(__magic_name__ ) , 512 ): UpperCamelCase :Tuple = bit_string[pos : pos + 512] UpperCamelCase :Optional[int] = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) UpperCamelCase :List[str] = format(__magic_name__ , """032b""" ) UpperCamelCase :Any = """""" for c in i_str: new_str += "1" if c == "0" else "0" return int(__magic_name__ , 2 ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" return (a + b) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError("""Input must be non-negative""" ) if shift < 0: raise ValueError("""Shift must be non-negative""" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def SCREAMING_SNAKE_CASE_ ( __magic_name__ : bytes ) -> bytes: """simple docstring""" UpperCamelCase :Tuple = preprocess(__magic_name__ ) UpperCamelCase :List[str] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states UpperCamelCase :Union[str, Any] = 0X67_45_23_01 UpperCamelCase :Union[str, Any] = 0XEF_CD_AB_89 UpperCamelCase :List[str] = 0X98_BA_DC_FE UpperCamelCase :int = 0X10_32_54_76 UpperCamelCase :int = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__magic_name__ ): UpperCamelCase :Optional[Any] = aa UpperCamelCase :Any = ba UpperCamelCase :Tuple = ca UpperCamelCase :List[str] = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f UpperCamelCase :int = d ^ (b & (c ^ d)) UpperCamelCase :Optional[int] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f UpperCamelCase :str = c ^ (d & (b ^ c)) UpperCamelCase :Union[str, Any] = (5 * i + 1) % 16 elif i <= 47: UpperCamelCase :str = b ^ c ^ d UpperCamelCase :Optional[int] = (3 * i + 5) % 16 else: UpperCamelCase :List[str] = c ^ (b | not_aa(__magic_name__ )) UpperCamelCase :int = (7 * i) % 16 UpperCamelCase :Dict = (f + a + added_consts[i] + block_words[g]) % 2**32 UpperCamelCase :Tuple = d UpperCamelCase :str = c UpperCamelCase :Tuple = b UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , left_rotate_aa(__magic_name__ , shift_amounts[i] ) ) # Add hashed chunk to running total UpperCamelCase :List[str] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :str = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :int = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = sum_aa(__magic_name__ , __magic_name__ ) UpperCamelCase :Optional[Any] = reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) + reformat_hex(__magic_name__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
38
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase__ : str = None lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase__ : int = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase__ : Optional[int] = { 'google/rembert': 2_56, } lowercase__ : str = '▁' class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : str = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Dict = RemBertTokenizer def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCamelCase = do_lower_case _UpperCamelCase = remove_space _UpperCamelCase = keep_accents _UpperCamelCase = vocab_file _UpperCamelCase = False if not self.vocab_file else True def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
324
0
from __future__ import annotations import collections import pprint from pathlib import Path def __A ( __lowerCAmelCase )-> str: """simple docstring""" return "".join(sorted(__lowerCAmelCase ) ) def __A ( __lowerCAmelCase )-> list[str]: """simple docstring""" return word_by_signature[signature(__lowerCAmelCase )] _a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') _a = sorted({word.strip().lower() for word in data.splitlines()}) _a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
39
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : str = logging.get_logger(__name__) lowercase__ : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = 'deformable_detr' _snake_case : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = backbone_config.get('''model_type''' ) _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowerCAmelCase__ ) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = max_position_embeddings _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # deformable attributes _UpperCamelCase = num_feature_levels _UpperCamelCase = encoder_n_points _UpperCamelCase = decoder_n_points _UpperCamelCase = two_stage _UpperCamelCase = two_stage_num_proposals _UpperCamelCase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient _UpperCamelCase = focal_alpha _UpperCamelCase = disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self : int ) -> int: '''simple docstring''' return self.d_model def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
324
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = {"""vocab_file""": """sentencepiece.bpe.model"""} __lowercase = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } __lowercase = { """camembert-base""": 512, } __lowercase = """▁""" class _A ( _a ): """simple docstring""" UpperCAmelCase : List[str] = VOCAB_FILES_NAMES UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : Any="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it a : Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__UpperCAmelCase)) a : List[Any] = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> a : Optional[int] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3} a : int = len(self.fairseq_tokens_to_ids) a : Union[str, Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids) a : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a : int = [self.cls_token_id] a : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase)) + [1] return [1] + ([0] * len(__UpperCAmelCase)) + [1, 1] + ([0] * len(__UpperCAmelCase)) + [1] def __snake_case ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None): a : List[str] = [self.sep_token_id] a : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def __snake_case ( self : str): return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def __snake_case ( self : Union[str, Any]): a : Any = {self.convert_ids_to_tokens(__UpperCAmelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __snake_case ( self : str , __UpperCAmelCase : str): return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : Dict): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(__UpperCAmelCase) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(__UpperCAmelCase) def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Tuple): a : int = [] a : Dict = "" a : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCAmelCase) + token a : str = True a : str = [] else: current_sub_tokens.append(__UpperCAmelCase) a : str = False out_string += self.sp_model.decode(__UpperCAmelCase) return out_string.strip() def __getstate__( self : Tuple): a : Any = self.__dict__.copy() a : Optional[Any] = None return state def __setstate__( self : Optional[int] , __UpperCAmelCase : List[str]): a : Dict = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): a : Any = {} a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None): if not os.path.isdir(__UpperCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return a : Optional[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __UpperCAmelCase) elif not os.path.isfile(self.vocab_file): with open(__UpperCAmelCase , "wb") as fi: a : List[Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase) return (out_vocab_file,)
40
'''simple docstring''' from __future__ import annotations def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]: """simple docstring""" _UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary _UpperCamelCase = frequencies_dict if not case_sensitive: _UpperCamelCase = ciphertext.lower() # Chi squared statistic values _UpperCamelCase = {} # cycle through all of the shifts for shift in range(len(lowercase ) ): _UpperCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.lower().count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCamelCase = min( lowercase, key=lowercase, ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
324
0
'''simple docstring''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> float: if digit_amount > 0: return round(number - int(UpperCamelCase ) , UpperCamelCase ) return number - int(UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
41
'''simple docstring''' import math def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list: """simple docstring""" _UpperCamelCase = end or len(lowercase ) for i in range(lowercase, lowercase ): _UpperCamelCase = i _UpperCamelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCamelCase = array[temp_index - 1] temp_index -= 1 _UpperCamelCase = temp_index_value return array def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap """simple docstring""" _UpperCamelCase = index _UpperCamelCase = 2 * index + 1 # Left Node _UpperCamelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCamelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCamelCase = right_index if largest != index: _UpperCamelCase , _UpperCamelCase = array[largest], array[index] heapify(lowercase, lowercase, lowercase ) def a__ ( lowercase : list ) -> list: """simple docstring""" _UpperCamelCase = len(lowercase ) for i in range(n // 2, -1, -1 ): heapify(lowercase, lowercase, lowercase ) for i in range(n - 1, 0, -1 ): _UpperCamelCase , _UpperCamelCase = array[0], array[i] heapify(lowercase, 0, lowercase ) return array def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" _UpperCamelCase = low _UpperCamelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCamelCase , _UpperCamelCase = array[j], array[i] i += 1 def a__ ( lowercase : list ) -> list: """simple docstring""" if len(lowercase ) == 0: return array _UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) ) _UpperCamelCase = 16 return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase ) def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 _UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 ) _UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase ) intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase ) _UpperCamelCase = p return insertion_sort(lowercase, lowercase, lowercase ) if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Any = input('Enter numbers separated by a comma : ').strip() lowercase__ : Any = [float(item) for item in user_input.split(',')] print(sort(unsorted))
324
0
'''simple docstring''' import math def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float: if ( not isinstance(__A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> float: if ( not isinstance(__A , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
42
'''simple docstring''' import os import numpy import onnx def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]: """simple docstring""" _UpperCamelCase = a.name _UpperCamelCase = b.name _UpperCamelCase = '''''' _UpperCamelCase = '''''' _UpperCamelCase = a == b _UpperCamelCase = name_a _UpperCamelCase = name_b return res def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase, lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) _graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowercase, lowercase, lowercase ) def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple: """simple docstring""" _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _UpperCamelCase = inits[i].name _UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph, lowercase, lowercase ) def a__ ( lowercase : Dict ) -> Dict: """simple docstring""" _UpperCamelCase = os.path.dirname(lowercase ) _UpperCamelCase = os.path.basename(lowercase ) _UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) ) _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = set() _UpperCamelCase = {} _UpperCamelCase = [] _UpperCamelCase = 0 for i in range(len(lowercase ) ): if i in dup_set: continue for j in range(i + 1, len(lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i], inits[j] ): dup_set.add(lowercase ) dup_set.add(lowercase ) _UpperCamelCase = inits[j].data_type _UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''', lowercase ) total_reduced_size += mem_size _UpperCamelCase = inits[i].name _UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase ) else: _UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' ) _UpperCamelCase = sorted(lowercase ) _remove_dup_initializers_from_model(lowercase, lowercase, lowercase ) _UpperCamelCase = '''optimized_''' + model_file_name _UpperCamelCase = os.path.join(lowercase, lowercase ) onnx.save(lowercase, lowercase ) return new_model
324
0
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration __lowercase = 5_0000 __lowercase = 5000 __lowercase , __lowercase = os.path.split(__file__) __lowercase = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for i in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase :int = dataset[i] @get_duration def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ): __UpperCamelCase :str = dataset[i : i + batch_size] @get_duration def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' with dataset.formatted_as(type=SCREAMING_SNAKE_CASE ): for i in range(SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[str] = dataset[i] @get_duration def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' with dataset.formatted_as(type=SCREAMING_SNAKE_CASE ): for i in range(0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[Any] = dataset[i : i + batch_size] def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Optional[Any] = {'''num examples''': SPEED_TEST_N_EXAMPLES} __UpperCamelCase :Optional[Any] = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}), ] __UpperCamelCase :List[str] = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''' ) __UpperCamelCase :Any = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} ) __UpperCamelCase :int = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE , num_examples=SCREAMING_SNAKE_CASE , seq_shapes={'''list''': (100,)} , ) print('''first set of iterations''' ) for func, kwargs in functions: print(func.__name__ , str(SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :Union[str, Any] = func(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) print('''shuffling dataset''' ) __UpperCamelCase :Union[str, Any] = dataset.shuffle() print('''Second set of iterations (after shuffling''' ) for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :List[Any] = func( SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , '''wb''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
43
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase__ : List[Any] = 25_00_04 lowercase__ : str = 25_00_20 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MBartTokenizer _snake_case : Tuple = MBartTokenizerFast _snake_case : List[str] = True _snake_case : Optional[Any] = True def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Dict = 'facebook/mbart-large-en-ro' _snake_case : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _snake_case : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case__ ( cls : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCamelCase = 1 return cls def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def snake_case__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) _UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCAmelCase__ ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case__ ( self : Tuple ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 250004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
324
0
"""simple docstring""" from math import ceil def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int: _lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) ) _lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check _lowerCAmelCase : Union[str, Any] = [] for i in device_map_blocks: if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(_lowerCamelCase ) # Missing blocks _lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks] _lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks] if len(_lowerCamelCase ) != 0: raise ValueError( """Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.""" """ These attention blocks were specified more than once: """ + str(_lowerCamelCase ) ) if len(_lowerCamelCase ) != 0: raise ValueError( """There are attention blocks for this model that are not specified in the device_map. Add these attention """ """blocks to a device on the device_map: """ + str(_lowerCamelCase ) ) if len(_lowerCamelCase ) != 0: raise ValueError( """The device_map contains more attention blocks than this model has. Remove these from the device_map:""" + str(_lowerCamelCase ) ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str: _lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) ) _lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) ) _lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )] return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
44
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase__ : str = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Union[str, Any] = ['pixel_values'] def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 256} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray: '''simple docstring''' return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]: '''simple docstring''' _UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase__ ): _UpperCamelCase = target_sizes.numpy() _UpperCamelCase = [] for idx in range(len(lowerCAmelCase__ ) ): _UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ ) _UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase__ ) else: _UpperCamelCase = logits.argmax(dim=1 ) _UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
324
0
"""simple docstring""" import math import os import sys def lowercase ( lowerCAmelCase__ : str ) -> str: __a = '''''' try: with open(lowerCAmelCase__ , '''rb''' ) as binary_file: __a = binary_file.read() for dat in data: __a = f'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def lowercase ( lowerCAmelCase__ : dict[str, str] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : str ) -> None: lexicon.pop(lowerCAmelCase__ ) __a = last_match_id if math.loga(lowerCAmelCase__ ).is_integer(): for curr_key in lexicon: __a = '''0''' + lexicon[curr_key] __a = bin(lowerCAmelCase__ )[2:] def lowercase ( lowerCAmelCase__ : str ) -> str: __a = {'''0''': '''0''', '''1''': '''1'''} __a , __a = '''''', '''''' __a = len(lowerCAmelCase__ ) for i in range(len(lowerCAmelCase__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __a = lexicon[curr_string] result += last_match_id add_key_to_lexicon(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) index += 1 __a = '''''' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __a = lexicon[curr_string] result += last_match_id return result def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> str: __a = os.path.getsize(lowerCAmelCase__ ) __a = bin(lowerCAmelCase__ )[2:] __a = len(lowerCAmelCase__ ) return "0" * (length_length - 1) + file_length_binary + compressed def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None: __a = 8 try: with open(lowerCAmelCase__ , '''wb''' ) as opened_file: __a = [ to_write[i : i + byte_length] for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None: __a = read_file_binary(lowerCAmelCase__ ) __a = compress_data(lowerCAmelCase__ ) __a = add_file_length(lowerCAmelCase__ , lowerCAmelCase__ ) write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
45
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : jnp.ndarray @flax_register_to_config class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : int = 3_2 _snake_case : int = 4 _snake_case : int = 4 _snake_case : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _snake_case : Union[bool, Tuple[bool]] = False _snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _snake_case : int = 2 _snake_case : Union[int, Tuple[int]] = 8 _snake_case : Optional[Union[int, Tuple[int]]] = None _snake_case : int = 1_2_8_0 _snake_case : float = 0.0 _snake_case : bool = False _snake_case : jnp.dtype = jnp.floataa _snake_case : bool = True _snake_case : int = 0 _snake_case : bool = False def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"] def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype ) _UpperCamelCase = self.only_cross_attention if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = down_blocks # mid _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCamelCase = [] _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = output_channel _UpperCamelCase = up_blocks # out _UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 ) _UpperCamelCase = self.time_proj(lowerCAmelCase__ ) _UpperCamelCase = self.time_embedding(lowerCAmelCase__ ) # 2. pre-process _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(lowerCAmelCase__ ) # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( lowerCAmelCase__ , lowerCAmelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = new_down_block_res_samples # 4. mid _UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = up_block( lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , ) else: _UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train ) # 6. post-process _UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ ) _UpperCamelCase = nn.silu(lowerCAmelCase__ ) _UpperCamelCase = self.conv_out(lowerCAmelCase__ ) _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
324
0
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase : def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Tuple: lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = seq_length lowerCAmelCase = is_training lowerCAmelCase = use_input_mask lowerCAmelCase = use_token_type_ids lowerCAmelCase = use_labels lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = type_vocab_size lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = num_labels lowerCAmelCase = num_choices lowerCAmelCase = scope def _snake_case ( self ) -> int: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase = None if self.use_input_mask: lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase = None if self.use_token_type_ids: lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self ) -> Dict: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str: lowerCAmelCase = BioGptModel(config=lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase , attention_mask=lowercase ) lowerCAmelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any: lowerCAmelCase = BioGptForCausalLM(config=lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[Any]: lowerCAmelCase = BioGptModel(config=lowercase ) model.to(lowercase ) model.eval() # create attention mask lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase ) lowerCAmelCase = self.seq_length // 2 lowerCAmelCase = 0 # first forward pass lowerCAmelCase , lowerCAmelCase = model(lowercase , attention_mask=lowercase ).to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids lowerCAmelCase = ids_tensor((1,) , lowercase ).item() + 1 lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) lowerCAmelCase = random_other_next_tokens # append to next input_ids and attn_mask lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , ) # get two different outputs lowerCAmelCase = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""] lowerCAmelCase = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["""last_hidden_state"""] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[Any]: lowerCAmelCase = BioGptModel(config=lowercase ).to(lowercase ).eval() lowerCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase ) # first forward pass lowerCAmelCase = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) lowerCAmelCase , lowerCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCAmelCase = model(lowercase , attention_mask=lowercase )["""last_hidden_state"""] lowerCAmelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[ """last_hidden_state""" ] # select random slice lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ) -> Optional[Any]: lowerCAmelCase = BioGptForCausalLM(lowercase ) model.to(lowercase ) if gradient_checkpointing: model.gradient_checkpointing_enable() lowerCAmelCase = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _snake_case ( self , lowercase , *lowercase ) -> List[str]: lowerCAmelCase = BioGptModel(lowercase ) lowerCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[int]: lowerCAmelCase = self.num_labels lowerCAmelCase = BioGptForTokenClassification(lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self ) -> Dict: lowerCAmelCase = self.prepare_config_and_inputs() ( ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ( lowerCAmelCase ) , ) = config_and_inputs lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): _SCREAMING_SNAKE_CASE = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE = (BioGptForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE = ( { 'feature-extraction': BioGptModel, 'text-classification': BioGptForSequenceClassification, 'text-generation': BioGptForCausalLM, 'token-classification': BioGptForTokenClassification, 'zero-shot': BioGptForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE = False def _snake_case ( self ) -> List[Any]: lowerCAmelCase = BioGptModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def _snake_case ( self ) -> Any: self.config_tester.run_common_tests() def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _snake_case ( self ) -> Dict: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase = type self.model_tester.create_and_check_model(*lowercase ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase ) def _snake_case ( self ) -> Tuple: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase ) def _snake_case ( self ) -> Optional[int]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase ) def _snake_case ( self ) -> Union[str, Any]: lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase ) @slow def _snake_case ( self ) -> List[str]: lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(lowercase ) lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase = """left""" # Define PAD Token = EOS Token = 50256 lowerCAmelCase = tokenizer.eos_token lowerCAmelCase = model.config.eos_token_id # use different length sentences to test batching lowerCAmelCase = [ """Hello, my dog is a little""", """Today, I""", ] lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" , padding=lowercase ) lowerCAmelCase = inputs["""input_ids"""].to(lowercase ) lowerCAmelCase = model.generate( input_ids=lowercase , attention_mask=inputs["""attention_mask"""].to(lowercase ) , ) lowerCAmelCase = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(lowercase ) lowerCAmelCase = model.generate(input_ids=lowercase ) lowerCAmelCase = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() lowerCAmelCase = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(lowercase ) lowerCAmelCase = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) lowerCAmelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) lowerCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) lowerCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) lowerCAmelCase = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) @slow def _snake_case ( self ) -> Optional[int]: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = BioGptModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def _snake_case ( self ) -> Tuple: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = input_dict["""input_ids"""] lowerCAmelCase = input_ids.ne(1 ).to(lowercase ) lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase = BioGptForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase = 3 lowerCAmelCase = """multi_label_classification""" lowerCAmelCase = input_dict["""input_ids"""] lowerCAmelCase = input_ids.ne(1 ).to(lowercase ) lowerCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase = BioGptForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() lowerCAmelCase = model(lowercase , attention_mask=lowercase , labels=lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class lowercase ( unittest.TestCase ): @slow def _snake_case ( self ) -> Optional[Any]: lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase = torch.tensor([[2, 4_805, 9, 656, 21]] ) lowerCAmelCase = model(lowercase )[0] lowerCAmelCase = 42_384 lowerCAmelCase = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , lowercase ) lowerCAmelCase = torch.tensor( [[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) ) @slow def _snake_case ( self ) -> List[Any]: lowerCAmelCase = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) lowerCAmelCase = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(lowercase ) torch.manual_seed(0 ) lowerCAmelCase = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(lowercase ) lowerCAmelCase = model.generate( **lowercase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=lowercase , ) lowerCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase ) lowerCAmelCase = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(lowercase , lowercase )
46
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase__ : List[str] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase__ : Dict = logging.getLogger() def a__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int: """simple docstring""" _UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: return json.load(lowercase ) raise ValueError(F"""can't find {path}""" ) lowercase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def snake_case__ ( self : Any ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_glue.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def snake_case__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_clm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def snake_case__ ( self : Tuple ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_summarization_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def snake_case__ ( self : str ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_ta_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_ner.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_qa.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
324
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : int ) -> int: """simple docstring""" if not isinstance(_UpperCamelCase , _UpperCamelCase ): raise ValueError('Input must be an integer' ) if input_num <= 0: raise ValueError('Input must be positive' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ : Optional[Any] = logging.getLogger() def a__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Dict ) -> int: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = os.path.join(lowercase, '''all_results.json''' ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: _UpperCamelCase = json.load(lowercase ) else: raise ValueError(F"""can't find {path}""" ) return results def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod def snake_case__ ( cls : Optional[int] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Tuple ) -> int: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : int ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) ) @slow def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCAmelCase__ ) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
324
0
import string def A ( _SCREAMING_SNAKE_CASE ) -> None: for key in range(len(string.ascii_uppercase ) ): lowerCamelCase : Optional[int] = "" for symbol in message: if symbol in string.ascii_uppercase: lowerCamelCase : Any = string.ascii_uppercase.find(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Optional[int] = num - key if num < 0: lowerCamelCase : Union[str, Any] = num + len(string.ascii_uppercase ) lowerCamelCase : str = translated + string.ascii_uppercase[num] else: lowerCamelCase : Optional[Any] = translated + symbol print(f'''Decryption using Key #{key}: {translated}''' ) def A ( ) -> None: lowerCamelCase : List[Any] = input("Encrypted message: " ) lowerCamelCase : int = message.upper() decrypt(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() main()
48
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" _UpperCamelCase = iter(lowercase ) while True: _UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) ) if not chunk: return yield chunk def a__ ( lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCamelCase = '''''' if len(lowercase ) < 2: return dirty for i in range(len(lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase ) & 1: clean += "X" return clean def a__ ( lowercase : str ) -> list[str]: """simple docstring""" _UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCamelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase ) return table def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = prepare_input(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
324
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case :int = logging.get_logger(__name__) __snake_case :Tuple = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Union[str, Any] = '''data2vec-text''' def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=30_522 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Dict=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : int=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Dict="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = position_embedding_type __a = use_cache __a = classifier_dropout class _A ( __UpperCAmelCase ): @property def _lowerCamelCase ( self : Tuple): '''simple docstring''' if self.task == "multiple-choice": __a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
49
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Any = {'vocab_file': 'spiece.model'} lowercase__ : Dict = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } lowercase__ : Optional[Any] = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ['input_ids', 'attention_mask'] _snake_case : List[int] = [] def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> Tuple: '''simple docstring''' return self.sp_model.get_piece_size() def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = '''''' _UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__ ) + token _UpperCamelCase = True _UpperCamelCase = [] else: current_sub_tokens.append(lowerCAmelCase__ ) _UpperCamelCase = False out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str: '''simple docstring''' _UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ ) _UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCamelCase = [] _UpperCamelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) _UpperCamelCase = [] sub_texts.append(lowerCAmelCase__ ) else: current_sub_text.append(lowerCAmelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: _UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) ) else: _UpperCamelCase = ''''''.join(lowerCAmelCase__ ) _UpperCamelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ ) return clean_text else: return text def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] _UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
324
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = 42 class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ): @register_to_config def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]: super().__init__() lowerCamelCase__ : Optional[int] = sample_size # time if time_embedding_type == "fourier": lowerCamelCase__ : Optional[Any] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase ) lowerCamelCase__ : Any = 2 * block_out_channels[0] elif time_embedding_type == "positional": lowerCamelCase__ : List[Any] = Timesteps( block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase ) lowerCamelCase__ : Dict = block_out_channels[0] if use_timestep_embedding: lowerCamelCase__ : str = block_out_channels[0] * 4 lowerCamelCase__ : List[Any] = TimestepEmbedding( in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , ) lowerCamelCase__ : Any = nn.ModuleList([] ) lowerCamelCase__ : Tuple = None lowerCamelCase__ : List[str] = nn.ModuleList([] ) lowerCamelCase__ : Optional[int] = None # down lowerCamelCase__ : Optional[int] = in_channels for i, down_block_type in enumerate(UpperCAmelCase ): lowerCamelCase__ : Union[str, Any] = output_channel lowerCamelCase__ : Tuple = block_out_channels[i] if i == 0: input_channel += extra_in_channels lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1 lowerCamelCase__ : Optional[int] = get_down_block( UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(UpperCAmelCase ) # mid lowerCamelCase__ : Optional[int] = get_mid_block( UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , ) # up lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) ) lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0] if out_block_type is None: lowerCamelCase__ : List[str] = out_channels else: lowerCamelCase__ : Any = block_out_channels[0] for i, up_block_type in enumerate(UpperCAmelCase ): lowerCamelCase__ : Tuple = output_channel lowerCamelCase__ : Union[str, Any] = ( reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels ) lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1 lowerCamelCase__ : Dict = get_up_block( UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(UpperCAmelCase ) lowerCamelCase__ : int = output_channel # out lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) lowerCamelCase__ : List[Any] = get_out_block( out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , ) def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]: lowerCamelCase__ : Optional[Any] = timestep if not torch.is_tensor(UpperCAmelCase ): lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0: lowerCamelCase__ : List[str] = timesteps[None].to(sample.device ) lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase ) if self.config.use_timestep_embedding: lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase ) else: lowerCamelCase__ : List[str] = timestep_embed[..., None] lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down lowerCamelCase__ : str = () for downsample_block in self.down_blocks: lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase ) down_block_res_samples += res_samples # 3. mid if self.mid_block: lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): lowerCamelCase__ : Dict = down_block_res_samples[-1:] lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1] lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase ) # 5. post-process if self.out_block: lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase ) if not return_dict: return (sample,) return UNetaDOutput(sample=UpperCAmelCase )
50
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 'audio-spectrogram-transformer' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = patch_size _UpperCamelCase = qkv_bias _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins
324
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
51
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase__ : Union[str, Any] = logging.get_logger(__name__) # General docstring lowercase__ : Dict = 'ResNetConfig' # Base docstring lowercase__ : str = 'microsoft/resnet-50' lowercase__ : Tuple = [1, 20_48, 7, 7] # Image classification docstring lowercase__ : Optional[Any] = 'microsoft/resnet-50' lowercase__ : List[str] = 'tiger cat' lowercase__ : List[Any] = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad( lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCamelCase = config.num_channels def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.pooler(lowerCAmelCase__ ) return embedding class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = out_channels // reduction _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = input for layer in self.layers: _UpperCamelCase = layer(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowerCAmelCase__ ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = ResNetConfig _snake_case : Union[str, Any] = 'resnet' _snake_case : Optional[int] = 'pixel_values' _snake_case : int = True def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' if isinstance(lowerCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = value lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowerCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config.num_labels _UpperCamelCase = ResNetModel(lowerCAmelCase__ ) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowerCAmelCase__ ) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = '''single_label_classification''' else: _UpperCamelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) super()._init_backbone(lowerCAmelCase__ ) _UpperCamelCase = [config.embedding_size] + config.hidden_sizes _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.hidden_states _UpperCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
324
0
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def a__ ( lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" if isinstance(lowercase, collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCAmelCase : """simple docstring""" def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple ) -> int: '''simple docstring''' pass def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str: '''simple docstring''' _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = after_output[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple: '''simple docstring''' pt_model.to(lowerCAmelCase__ ) pt_model.eval() # prepare inputs _UpperCamelCase = inputs_dict _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple() _UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ ) _UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ ) pt_model_loaded.to(lowerCAmelCase__ ) pt_model_loaded.eval() with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 ) def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ ) _UpperCamelCase = fx_state self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__ ) @is_pt_flax_cross_test def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_inputs_dict.pop('''vision_config''' ) _UpperCamelCase = config_inputs_dict.pop('''text_config''' ) _UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = after_outputs[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-5 ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxViTModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = FlaxViTModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : List[str] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ) _UpperCamelCase = model(**lowerCAmelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
324
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available a__ : Union[str, Any] ={ '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] =[ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] =['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys a__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
53
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Union[str, Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] _UpperCamelCase = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase__ ) _UpperCamelCase = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
324
0
"""simple docstring""" a__ : Any = frozenset( [ '''prompt''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) a__ : Optional[int] = frozenset(['''prompt''', '''negative_prompt''']) a__ : Dict = frozenset([]) a__ : int = frozenset(['''image''']) a__ : Union[str, Any] = frozenset( [ '''image''', '''height''', '''width''', '''guidance_scale''', ] ) a__ : Optional[Any] = frozenset(['''image''']) a__ : List[Any] = frozenset( [ '''prompt''', '''image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) a__ : str = frozenset(['''prompt''', '''image''', '''negative_prompt''']) a__ : int = frozenset( [ # Text guided image variation with an image mask '''prompt''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', ] ) a__ : Any = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt''']) a__ : Optional[int] = frozenset( [ # image variation with an image mask '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) a__ : Optional[int] = frozenset(['''image''', '''mask_image''']) a__ : str = frozenset( [ '''example_image''', '''image''', '''mask_image''', '''height''', '''width''', '''guidance_scale''', ] ) a__ : str = frozenset(['''example_image''', '''image''', '''mask_image''']) a__ : List[str] = frozenset(['''class_labels''']) a__ : Any = frozenset(['''class_labels''']) a__ : List[str] = frozenset(['''batch_size''']) a__ : Optional[int] = frozenset([]) a__ : List[Any] = frozenset(['''batch_size''']) a__ : List[Any] = frozenset([]) a__ : int = frozenset( [ '''prompt''', '''audio_length_in_s''', '''guidance_scale''', '''negative_prompt''', '''prompt_embeds''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', ] ) a__ : Union[str, Any] = frozenset(['''prompt''', '''negative_prompt''']) a__ : str = frozenset(['''input_tokens''']) a__ : Optional[int] = frozenset(['''input_tokens'''])
54
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std def snake_case__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Tuple = LevitImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = LevitImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
324
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Optional[Any] = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
55
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE lowercase__ : int = 'config.json' lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin' lowercase__ : List[str] = 'diffusion_flax_model.msgpack' lowercase__ : str = 'model.onnx' lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors' lowercase__ : List[str] = 'weights.pb' lowercase__ : str = 'https://huggingface.co' lowercase__ : str = default_cache_path lowercase__ : Optional[int] = 'diffusers_modules' lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules')) lowercase__ : Tuple = ['fp16', 'non-ema'] lowercase__ : int = '.self_attn'
324
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class a ( _lowerCamelCase ): snake_case_ = 42 class a ( nn.Module ): def __init__( self : int , lowercase_ : List[Any]=3 , lowercase_ : str=3 , lowercase_ : Union[str, Any]=("DownEncoderBlock2D",) , lowercase_ : str=(64,) , lowercase_ : Optional[Any]=2 , lowercase_ : Optional[Any]=32 , lowercase_ : Optional[int]="silu" , lowercase_ : Optional[int]=True , ): super().__init__() snake_case_ = layers_per_block snake_case_ = torch.nn.Convad( lowercase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) snake_case_ = None snake_case_ = nn.ModuleList([] ) # down snake_case_ = block_out_channels[0] for i, down_block_type in enumerate(lowercase_ ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 snake_case_ = get_down_block( lowercase_ , num_layers=self.layers_per_block , in_channels=lowercase_ , out_channels=lowercase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , ) self.down_blocks.append(lowercase_ ) # mid snake_case_ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , ) # out snake_case_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowercase_ , eps=1e-6 ) snake_case_ = nn.SiLU() snake_case_ = 2 * out_channels if double_z else out_channels snake_case_ = nn.Convad(block_out_channels[-1] , lowercase_ , 3 , padding=1 ) snake_case_ = False def A_ ( self : Union[str, Any] , lowercase_ : Tuple ): snake_case_ = x snake_case_ = self.conv_in(lowercase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowercase_ : Tuple ): def custom_forward(*lowercase_ : Optional[int] ): return module(*lowercase_ ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: snake_case_ = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase_ ) , lowercase_ , use_reentrant=lowercase_ ) # middle snake_case_ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , use_reentrant=lowercase_ ) else: for down_block in self.down_blocks: snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ ) # middle snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowercase_ ) else: # down for down_block in self.down_blocks: snake_case_ = down_block(lowercase_ ) # middle snake_case_ = self.mid_block(lowercase_ ) # post-process snake_case_ = self.conv_norm_out(lowercase_ ) snake_case_ = self.conv_act(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) return sample class a ( nn.Module ): def __init__( self : Any , lowercase_ : Tuple=3 , lowercase_ : int=3 , lowercase_ : Dict=("UpDecoderBlock2D",) , lowercase_ : Tuple=(64,) , lowercase_ : Tuple=2 , lowercase_ : Union[str, Any]=32 , lowercase_ : str="silu" , lowercase_ : List[str]="group" , ): super().__init__() snake_case_ = layers_per_block snake_case_ = nn.Convad( lowercase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) snake_case_ = None snake_case_ = nn.ModuleList([] ) snake_case_ = in_channels if norm_type == '''spatial''' else None # mid snake_case_ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowercase_ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowercase_ , temb_channels=lowercase_ , ) # up snake_case_ = list(reversed(lowercase_ ) ) snake_case_ = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowercase_ ): snake_case_ = output_channel snake_case_ = reversed_block_out_channels[i] snake_case_ = i == len(lowercase_ ) - 1 snake_case_ = get_up_block( lowercase_ , num_layers=self.layers_per_block + 1 , in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowercase_ , resnet_groups=lowercase_ , attention_head_dim=lowercase_ , temb_channels=lowercase_ , resnet_time_scale_shift=lowercase_ , ) self.up_blocks.append(lowercase_ ) snake_case_ = output_channel # out if norm_type == "spatial": snake_case_ = SpatialNorm(block_out_channels[0] , lowercase_ ) else: snake_case_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowercase_ , eps=1e-6 ) snake_case_ = nn.SiLU() snake_case_ = nn.Convad(block_out_channels[0] , lowercase_ , 3 , padding=1 ) snake_case_ = False def A_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int]=None ): snake_case_ = z snake_case_ = self.conv_in(lowercase_ ) snake_case_ = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowercase_ : int ): def custom_forward(*lowercase_ : Any ): return module(*lowercase_ ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle snake_case_ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ ) snake_case_ = sample.to(lowercase_ ) # up for up_block in self.up_blocks: snake_case_ = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ , use_reentrant=lowercase_ ) else: # middle snake_case_ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowercase_ , lowercase_ ) snake_case_ = sample.to(lowercase_ ) # up for up_block in self.up_blocks: snake_case_ = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase_ ) , lowercase_ , lowercase_ ) else: # middle snake_case_ = self.mid_block(lowercase_ , lowercase_ ) snake_case_ = sample.to(lowercase_ ) # up for up_block in self.up_blocks: snake_case_ = up_block(lowercase_ , lowercase_ ) # post-process if latent_embeds is None: snake_case_ = self.conv_norm_out(lowercase_ ) else: snake_case_ = self.conv_norm_out(lowercase_ , lowercase_ ) snake_case_ = self.conv_act(lowercase_ ) snake_case_ = self.conv_out(lowercase_ ) return sample class a ( nn.Module ): def __init__( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Any=None , lowercase_ : Optional[int]="random" , lowercase_ : Optional[int]=False , lowercase_ : int=True ): super().__init__() snake_case_ = n_e snake_case_ = vq_embed_dim snake_case_ = beta snake_case_ = legacy snake_case_ = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) snake_case_ = remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) snake_case_ = self.used.shape[0] snake_case_ = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": snake_case_ = self.re_embed snake_case_ = self.re_embed + 1 print( F"Remapping {self.n_e} indices to {self.re_embed} indices. " F"Using {self.unknown_index} for unknown indices." ) else: snake_case_ = n_e snake_case_ = sane_index_shape def A_ ( self : str , lowercase_ : List[str] ): snake_case_ = inds.shape assert len(lowercase_ ) > 1 snake_case_ = inds.reshape(ishape[0] , -1 ) snake_case_ = self.used.to(lowercase_ ) snake_case_ = (inds[:, :, None] == used[None, None, ...]).long() snake_case_ = match.argmax(-1 ) snake_case_ = match.sum(2 ) < 1 if self.unknown_index == "random": snake_case_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: snake_case_ = self.unknown_index return new.reshape(lowercase_ ) def A_ ( self : int , lowercase_ : Tuple ): snake_case_ = inds.shape assert len(lowercase_ ) > 1 snake_case_ = inds.reshape(ishape[0] , -1 ) snake_case_ = self.used.to(lowercase_ ) if self.re_embed > self.used.shape[0]: # extra token snake_case_ = 0 # simply set to zero snake_case_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowercase_ ) return back.reshape(lowercase_ ) def A_ ( self : str , lowercase_ : Any ): # reshape z -> (batch, height, width, channel) and flatten snake_case_ = z.permute(0 , 2 , 3 , 1 ).contiguous() snake_case_ = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z snake_case_ = torch.argmin(torch.cdist(lowercase_ , self.embedding.weight ) , dim=1 ) snake_case_ = self.embedding(lowercase_ ).view(z.shape ) snake_case_ = None snake_case_ = None # compute loss for embedding if not self.legacy: snake_case_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: snake_case_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients snake_case_ = z + (z_q - z).detach() # reshape back to match original input shape snake_case_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: snake_case_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis snake_case_ = self.remap_to_used(lowercase_ ) snake_case_ = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: snake_case_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def A_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: snake_case_ = indices.reshape(shape[0] , -1 ) # add batch axis snake_case_ = self.unmap_to_all(lowercase_ ) snake_case_ = indices.reshape(-1 ) # flatten again # get quantized latent vectors snake_case_ = self.embedding(lowercase_ ) if shape is not None: snake_case_ = z_q.view(lowercase_ ) # reshape back to match original input shape snake_case_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class a ( _lowerCamelCase ): def __init__( self : Tuple , lowercase_ : List[str] , lowercase_ : Any=False ): snake_case_ = parameters snake_case_ ,snake_case_ = torch.chunk(lowercase_ , 2 , dim=1 ) snake_case_ = torch.clamp(self.logvar , -30.0 , 20.0 ) snake_case_ = deterministic snake_case_ = torch.exp(0.5 * self.logvar ) snake_case_ = torch.exp(self.logvar ) if self.deterministic: snake_case_ = snake_case_ = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def A_ ( self : List[Any] , lowercase_ : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype snake_case_ = randn_tensor( self.mean.shape , generator=lowercase_ , device=self.parameters.device , dtype=self.parameters.dtype ) snake_case_ = self.mean + self.std * sample return x def A_ ( self : Dict , lowercase_ : Any=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def A_ ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) snake_case_ = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowercase_ ) def A_ ( self : Dict ): return self.mean
56
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : str = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def a__ ( lowercase : str ) -> Dict: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase = k.replace(lowercase, lowercase ) if k.startswith('''encoder''' ): _UpperCamelCase = k.replace('''.attn''', '''.self_attn''' ) _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' ) return k def a__ ( lowercase : List[str] ) -> List[Any]: """simple docstring""" _UpperCamelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _UpperCamelCase = sd.pop(lowercase ) _UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd _UpperCamelCase = v lowercase__ : str = ['START'] @torch.no_grad() def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict: """simple docstring""" _UpperCamelCase = torch.load(lowercase, map_location='''cpu''' ) _UpperCamelCase = model['''model'''] _UpperCamelCase = BlenderbotConfig.from_json_file(lowercase ) _UpperCamelCase = BlenderbotForConditionalGeneration(lowercase ) _UpperCamelCase = m.model.state_dict().keys() _UpperCamelCase = [] _UpperCamelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase = rename_state_dict_key(lowercase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowercase ) m.model.load_state_dict(lowercase, strict=lowercase ) m.half() m.save_pretrained(lowercase ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
324
0
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A : str = logging.get_logger(__name__) class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : str =["""input_ids""", """attention_mask"""] def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: __lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens" ) __lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token __lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token __lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token super().__init__( eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , ) __lowerCAmelCase = extra_ids __lowerCAmelCase = 2**8 # utf is 8 bits # define special tokens dict __lowerCAmelCase = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __lowerCAmelCase = len(self.special_tokens_encoder ) __lowerCAmelCase = len(__a ) for i, token in enumerate(__a ): __lowerCAmelCase = self.vocab_size + i - n __lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()} @property def snake_case ( self ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def snake_case ( self , __a , __a = None , __a = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__a )) + [1] return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1] def snake_case ( self , __a ): if len(__a ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def snake_case ( self , __a , __a = None ): __lowerCAmelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def snake_case ( self , __a , __a = None ): __lowerCAmelCase = self._add_eos_if_not_present(__a ) if token_ids_a is None: return token_ids_a else: __lowerCAmelCase = self._add_eos_if_not_present(__a ) return token_ids_a + token_ids_a def snake_case ( self , __a ): __lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )] return tokens def snake_case ( self , __a ): if token in self.special_tokens_encoder: __lowerCAmelCase = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __lowerCAmelCase = self.added_tokens_encoder[token] elif len(__a ) != 1: __lowerCAmelCase = self.unk_token_id else: __lowerCAmelCase = ord(__a ) + self._num_special_tokens return token_id def snake_case ( self , __a ): if index in self.special_tokens_decoder: __lowerCAmelCase = self.special_tokens_decoder[index] else: __lowerCAmelCase = chr(index - self._num_special_tokens ) return token def snake_case ( self , __a ): __lowerCAmelCase = B"" for token in tokens: if token in self.special_tokens_decoder: __lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" ) elif token in self.added_tokens_decoder: __lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" ) elif token in self.special_tokens_encoder: __lowerCAmelCase = token.encode("utf-8" ) elif token in self.added_tokens_encoder: __lowerCAmelCase = token.encode("utf-8" ) else: __lowerCAmelCase = bytes([ord(__a )] ) bstring += tok_string __lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" ) return string def snake_case ( self , __a , __a = None ): return ()
57
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Tuple = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
0
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class a_ : '''simple docstring''' UpperCamelCase = 42 # [batch_size x 3] UpperCamelCase = 42 # [batch_size x 3] UpperCamelCase = 42 # [batch_size x 3] UpperCamelCase = 42 # [batch_size x 3] UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def snake_case_( self ) -> int: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def snake_case_( self ) -> str: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def snake_case_( self ) -> List[str]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def snake_case_( self ) -> torch.Tensor: _SCREAMING_SNAKE_CASE = torch.arange(self.height * self.width ) _SCREAMING_SNAKE_CASE = torch.stack( [ pixel_indices % self.width, torch.div(A , self.width , rounding_mode="""trunc""" ), ] , axis=1 , ) return coords @property def snake_case_( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = self.shape _SCREAMING_SNAKE_CASE = int(np.prod(A ) ) _SCREAMING_SNAKE_CASE = self.get_image_coords() _SCREAMING_SNAKE_CASE = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) _SCREAMING_SNAKE_CASE = self.get_camera_rays(A ) _SCREAMING_SNAKE_CASE = rays.view(A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def snake_case_( self , A ) -> torch.Tensor: _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] _SCREAMING_SNAKE_CASE = coords.view(A , -1 , 2 ) _SCREAMING_SNAKE_CASE = self.resolution() _SCREAMING_SNAKE_CASE = self.fov() _SCREAMING_SNAKE_CASE = (flat.float() / (res - 1)) * 2 - 1 _SCREAMING_SNAKE_CASE = fracs * torch.tan(fov / 2 ) _SCREAMING_SNAKE_CASE = fracs.view(A , -1 , 2 ) _SCREAMING_SNAKE_CASE = ( self.z.view(A , 1 , 3 ) + self.x.view(A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(A , 1 , 3 ) * fracs[:, :, 1:] ) _SCREAMING_SNAKE_CASE = directions / directions.norm(dim=-1 , keepdim=A ) _SCREAMING_SNAKE_CASE = torch.stack( [ torch.broadcast_to(self.origin.view(A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(A , *A , 2 , 3 ) def snake_case_( self , A , A ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=A , height=A , x_fov=self.x_fov , y_fov=self.y_fov , ) def lowerCamelCase ( __lowerCamelCase : int ) ->DifferentiableProjectiveCamera: _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): _SCREAMING_SNAKE_CASE = np.array([np.sin(__lowerCamelCase ), np.cos(__lowerCamelCase ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) _SCREAMING_SNAKE_CASE = -z * 4 _SCREAMING_SNAKE_CASE = np.array([np.cos(__lowerCamelCase ), -np.sin(__lowerCamelCase ), 0.0] ) _SCREAMING_SNAKE_CASE = np.cross(__lowerCamelCase , __lowerCamelCase ) origins.append(__lowerCamelCase ) xs.append(__lowerCamelCase ) ys.append(__lowerCamelCase ) zs.append(__lowerCamelCase ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCamelCase , axis=0 ) ).float() , width=__lowerCamelCase , height=__lowerCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCamelCase )) , )
58
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowercase__ : Any = logging.get_logger(__name__) class __lowerCAmelCase : """simple docstring""" _snake_case : List[str] = None @experimental def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int: """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]: """simple docstring""" _UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase ) _UpperCamelCase = [] # We organize the splits ourselve (contiguous splits) for index in range(lowercase ): _UpperCamelCase = len(lowercase ) // num_proc _UpperCamelCase = len(lowercase ) % num_proc _UpperCamelCase = div * index + min(lowercase, lowercase ) _UpperCamelCase = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(lowercase )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) _UpperCamelCase , _UpperCamelCase = None, None if not disable_tqdm: _UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool: _UpperCamelCase = pool.map(lowercase, lowercase ) logger.info(F"""Finished {num_proc} processes""" ) _UpperCamelCase = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(lowercase )} objects""" ) return mapped def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any: """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ): return joblib.Parallel()( joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def a__ ( lowercase : str ) -> Optional[int]: """simple docstring""" _UpperCamelCase = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: _UpperCamelCase = None
324
0
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def UpperCamelCase ( __lowerCamelCase : int ): def remove_articles(__lowerCamelCase : Dict ): snake_case : Dict = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(__lowerCamelCase , " " , __lowerCamelCase ) def white_space_fix(__lowerCamelCase : Dict ): return " ".join(text.split() ) def remove_punc(__lowerCamelCase : Optional[int] ): snake_case : List[Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__lowerCamelCase : Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) ) def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Dict ): return int(normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase ) ) def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): snake_case : Optional[int] = [any(compute_exact(__lowerCamelCase , __lowerCamelCase ) for ref in refs ) for pred, refs in zip(__lowerCamelCase , __lowerCamelCase )] return (sum(__lowerCamelCase ) / len(__lowerCamelCase )) * 100 def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ): snake_case : Any = [rgram for rgrams in rgramslist for rgram in rgrams] snake_case : Optional[int] = Counter(__lowerCamelCase ) snake_case : Union[str, Any] = Counter(__lowerCamelCase ) snake_case : List[Any] = Counter() for sgram, scount in sgramcounter.items(): snake_case : Optional[int] = scount * numref snake_case : Dict = Counter(__lowerCamelCase ) snake_case : Optional[int] = Counter() for cgram, ccount in cgramcounter.items(): snake_case : int = ccount * numref # KEEP snake_case : List[Any] = sgramcounter_rep & cgramcounter_rep snake_case : Optional[Any] = keepgramcounter_rep & rgramcounter snake_case : List[Any] = sgramcounter_rep & rgramcounter snake_case : int = 0 snake_case : List[Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case : Optional[int] = 1 snake_case : Optional[Any] = 1 if len(__lowerCamelCase ) > 0: snake_case : Tuple = keeptmpscorea / len(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) snake_case : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) snake_case : int = 0 if keepscore_precision > 0 or keepscore_recall > 0: snake_case : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION snake_case : Optional[Any] = sgramcounter_rep - cgramcounter_rep snake_case : Optional[int] = delgramcounter_rep - rgramcounter snake_case : Optional[int] = sgramcounter_rep - rgramcounter snake_case : int = 0 snake_case : Tuple = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case : Any = 1 if len(__lowerCamelCase ) > 0: snake_case : Tuple = deltmpscorea / len(__lowerCamelCase ) # ADDITION snake_case : Any = set(__lowerCamelCase ) - set(__lowerCamelCase ) snake_case : str = set(__lowerCamelCase ) & set(__lowerCamelCase ) snake_case : str = set(__lowerCamelCase ) - set(__lowerCamelCase ) snake_case : int = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case : List[str] = 1 snake_case : List[Any] = 1 if len(__lowerCamelCase ) > 0: snake_case : Union[str, Any] = addtmpscore / len(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: snake_case : Tuple = addtmpscore / len(__lowerCamelCase ) snake_case : Optional[Any] = 0 if addscore_precision > 0 or addscore_recall > 0: snake_case : str = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ): snake_case : List[str] = len(__lowerCamelCase ) snake_case : Optional[int] = ssent.split(" " ) snake_case : Optional[int] = csent.split(" " ) snake_case : Any = [] snake_case : Dict = [] snake_case : Union[str, Any] = [] snake_case : List[str] = [] snake_case : Any = [] snake_case : List[Any] = [] snake_case : Tuple = [] snake_case : str = [] snake_case : int = [] snake_case : str = [] for rsent in rsents: snake_case : Tuple = rsent.split(" " ) snake_case : Dict = [] snake_case : int = [] snake_case : Optional[Any] = [] ragramslist.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: snake_case : Any = ragrams[i] + " " + ragrams[i + 1] ragrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: snake_case : str = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: snake_case : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) ragramslist.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: snake_case : List[str] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: snake_case : Dict = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: snake_case : Any = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(__lowerCamelCase ) for i in range(0 , len(__lowerCamelCase ) - 1 ): if i < len(__lowerCamelCase ) - 1: snake_case : Tuple = cagrams[i] + " " + cagrams[i + 1] cagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 2: snake_case : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(__lowerCamelCase ) if i < len(__lowerCamelCase ) - 3: snake_case : Any = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(__lowerCamelCase ) ((snake_case) , (snake_case) , (snake_case)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((snake_case) , (snake_case) , (snake_case)) : Optional[int] = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((snake_case) , (snake_case) , (snake_case)) : int = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) ((snake_case) , (snake_case) , (snake_case)) : Tuple = SARIngram(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) snake_case : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 snake_case : Union[str, Any] = sum([delascore, delascore, delascore, delascore] ) / 4 snake_case : Tuple = sum([addascore, addascore, addascore, addascore] ) / 4 snake_case : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : str = "13a" , __lowerCamelCase : bool = True ): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: snake_case : str = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: snake_case : Dict = sacrebleu.metrics.bleu._get_tokenizer(__lowerCamelCase )()(__lowerCamelCase ) else: snake_case : List[Any] = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCamelCase ) elif tokenizer == "moses": snake_case : List[Any] = sacremoses.MosesTokenizer().tokenize(__lowerCamelCase , return_str=__lowerCamelCase , escape=__lowerCamelCase ) elif tokenizer == "penn": snake_case : Union[str, Any] = sacremoses.MosesTokenizer().penn_tokenize(__lowerCamelCase , return_str=__lowerCamelCase ) else: snake_case : List[str] = sentence if not return_str: snake_case : Any = normalized_sent.split() return normalized_sent def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ): if not (len(__lowerCamelCase ) == len(__lowerCamelCase ) == len(__lowerCamelCase )): raise ValueError("Sources length must match predictions and references lengths." ) snake_case : int = 0 for src, pred, refs in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): sari_score += SARIsent(normalize(__lowerCamelCase ) , normalize(__lowerCamelCase ) , [normalize(__lowerCamelCase ) for sent in refs] ) snake_case : Optional[int] = sari_score / len(__lowerCamelCase ) return 100 * sari_score def UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]="exp" , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Optional[int]=False , ): snake_case : Dict = len(references[0] ) if any(len(__lowerCamelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case : Union[str, Any] = [[refs[i] for refs in references] for i in range(__lowerCamelCase )] snake_case : Optional[int] = sacrebleu.corpus_bleu( __lowerCamelCase , __lowerCamelCase , smooth_method=__lowerCamelCase , smooth_value=__lowerCamelCase , force=__lowerCamelCase , lowercase=__lowerCamelCase , use_effective_order=__lowerCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> int: '''simple docstring''' snake_case : str = {} result.update({"sari": compute_sari(sources=snake_case__ , predictions=snake_case__ , references=snake_case__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=snake_case__ , references=snake_case__ )} ) result.update({"exact": compute_em(predictions=snake_case__ , references=snake_case__ )} ) return result
59
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str: '''simple docstring''' if not batched: _UpperCamelCase = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): _UpperCamelCase , _UpperCamelCase = image.size else: _UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2] if w < h: _UpperCamelCase = int(self.size['''shortest_edge'''] * h / w ) _UpperCamelCase = self.size['''shortest_edge'''] elif w > h: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = self.size['''shortest_edge'''] else: _UpperCamelCase = [] for image in image_inputs: _UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = DeformableDetrImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) _UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' pass def snake_case__ ( self : int ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : str ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them _UpperCamelCase = DeformableDetrImageProcessor() _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) ) @slow def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} _UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify masks _UpperCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
324
0
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class snake_case_: __UpperCamelCase = 42 __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = None __UpperCamelCase = field(default='''Translation''' , init=a__ , repr=a__ ) def __call__( self : Union[str, Any] ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase__ ( self : List[Any] ): from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class snake_case_: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None # Automatically constructed __UpperCamelCase = "dict" __UpperCamelCase = None __UpperCamelCase = field(default='''TranslationVariableLanguages''' , init=a__ , repr=a__ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : List[Any] = sorted(set(self.languages ) ) if self.languages else None lowerCAmelCase : int = len(self.languages ) if self.languages else None def __call__( self : List[Any] ): return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] ): lowerCAmelCase : List[Any] = set(self.languages ) if self.languages and set(UpperCamelCase_ ) - lang_set: raise ValueError( F'''Some languages in example ({", ".join(sorted(set(UpperCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(UpperCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. lowerCAmelCase : List[str] = [] for lang, text in translation_dict.items(): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. lowerCAmelCase, lowerCAmelCase : Optional[Any] = zip(*sorted(UpperCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase__ ( self : Dict ): from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
60
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase__ : str = None lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase__ : int = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase__ : Optional[int] = { 'google/rembert': 2_56, } lowercase__ : str = '▁' class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : str = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Dict = RemBertTokenizer def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCamelCase = do_lower_case _UpperCamelCase = remove_space _UpperCamelCase = keep_accents _UpperCamelCase = vocab_file _UpperCamelCase = False if not self.vocab_file else True def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
324
0
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): for param, grad_param in zip(model_a.parameters(), model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad, grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ): model.train() UpperCAmelCase_ : int = model(__lowerCamelCase ) UpperCAmelCase_ : List[str] = F.mse_loss(__lowerCamelCase, target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__lowerCamelCase ) def __a ( __lowerCamelCase, __lowerCamelCase=False ): set_seed(42 ) UpperCAmelCase_ : Dict = RegressionModel() UpperCAmelCase_ : Optional[Any] = deepcopy(__lowerCamelCase ) UpperCAmelCase_ : Tuple = RegressionDataset(length=80 ) UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ : Any = AdamW(params=model.parameters(), lr=1E-3 ) UpperCAmelCase_ : str = AdamW(params=ddp_model.parameters(), lr=1E-3 ) UpperCAmelCase_ : str = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 ) UpperCAmelCase_ : List[str] = LambdaLR(__lowerCamelCase, lr_lambda=lambda __lowerCamelCase : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(__lowerCamelCase, __lowerCamelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def __a ( __lowerCamelCase ): # Test when on a single CPU or GPU that the context manager does nothing UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_training_setup(__lowerCamelCase ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = next(iter(__lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: # Sync grads step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad, ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase_ : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )] def __a ( __lowerCamelCase ): # Test on distributed setup that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = get_training_setup(__lowerCamelCase ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ : int = next(iter(__lowerCamelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) else: # Sync grads step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase_ : Dict = ddp_input[torch.randperm(len(__lowerCamelCase ) )] def __a ( __lowerCamelCase=False, __lowerCamelCase=False ): UpperCAmelCase_ : Tuple = Accelerator( split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_training_setup(__lowerCamelCase ) for iteration, batch in enumerate(__lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : int = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters(), ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad, ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) UpperCAmelCase_ : int = ddp_input[torch.randperm(len(__lowerCamelCase ) )] GradientState._reset_state() def __a ( __lowerCamelCase=False, __lowerCamelCase=False ): UpperCAmelCase_ : List[Any] = Accelerator( split_batches=__lowerCamelCase, dispatch_batches=__lowerCamelCase, gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_training_setup(__lowerCamelCase, __lowerCamelCase ) for iteration, batch in enumerate(__lowerCamelCase ): UpperCAmelCase_ , UpperCAmelCase_ : str = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ : str = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__lowerCamelCase ): step_model(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase )) if accelerator.num_processes > 1: check_model_parameters(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def __a ( ): UpperCAmelCase_ : Dict = Accelerator() UpperCAmelCase_ : Tuple = RegressionDataset(length=80 ) UpperCAmelCase_ : str = DataLoader(__lowerCamelCase, batch_size=16 ) UpperCAmelCase_ : Optional[Any] = RegressionDataset(length=96 ) UpperCAmelCase_ : List[Any] = DataLoader(__lowerCamelCase, batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(__lowerCamelCase, __lowerCamelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase ) if iteration < len(__lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__lowerCamelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase ) if batch_num < len(__lowerCamelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def __a ( ): UpperCAmelCase_ : str = Accelerator() UpperCAmelCase_ : int = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(__lowerCamelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(__lowerCamelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", ) test_gradient_accumulation(__lowerCamelCase, __lowerCamelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<", "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", "`split_batches=False`, `dispatch_batches=False`**", ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, ", f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""", ) test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
61
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : str = logging.get_logger(__name__) lowercase__ : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = 'deformable_detr' _snake_case : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = backbone_config.get('''model_type''' ) _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowerCAmelCase__ ) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = max_position_embeddings _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # deformable attributes _UpperCamelCase = num_feature_levels _UpperCamelCase = encoder_n_points _UpperCamelCase = decoder_n_points _UpperCamelCase = two_stage _UpperCamelCase = two_stage_num_proposals _UpperCamelCase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient _UpperCamelCase = focal_alpha _UpperCamelCase = disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self : int ) -> int: '''simple docstring''' return self.d_model def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
324
0
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin _A = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _A = 25_6047 _A = 25_6145 @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( A_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Tuple = NllbTokenizer UpperCAmelCase__ : Any = NllbTokenizerFast UpperCAmelCase__ : str = True UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : Dict = {} def _a ( self ) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing __UpperCamelCase =NllbTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self ) -> Optional[int]: __UpperCamelCase =NllbTokenizer(A_ , keep_accents=A_ ) __UpperCamelCase =tokenizer.tokenize('This is a test' ) self.assertListEqual(A_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCamelCase =tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def _a ( self ) -> int: __UpperCamelCase =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) __UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ ) __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =tokenizer_r.save_pretrained(A_ ) __UpperCamelCase =tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) __UpperCamelCase =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way __UpperCamelCase =tokenizer_r.from_pretrained(A_ ) __UpperCamelCase =tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) __UpperCamelCase =tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way __UpperCamelCase =tokenizer_r.from_pretrained(A_ ) __UpperCamelCase =tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) __UpperCamelCase =tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way __UpperCamelCase =tokenizer_r.from_pretrained(A_ ) __UpperCamelCase =tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) @require_torch def _a ( self ) -> List[Any]: if not self.test_seqaseq: return __UpperCamelCase =self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Longer text that will definitely require truncation. __UpperCamelCase =[ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] __UpperCamelCase =[ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: __UpperCamelCase =tokenizer.prepare_seqaseq_batch( src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified __UpperCamelCase =tokenizer.prepare_seqaseq_batch( A_ , tgt_texts=A_ , max_length=3 , return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) __UpperCamelCase =tokenizer.prepare_seqaseq_batch( src_texts=A_ , max_length=3 , max_target_length=10 , return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn('decoder_input_ids' , A_ ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def _a ( self ) -> List[Any]: pass def _a ( self ) -> List[str]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __UpperCamelCase =[AddedToken('<special>' , lstrip=A_ )] __UpperCamelCase =self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) __UpperCamelCase =tokenizer_r.encode('Hey this is a <special> token' ) __UpperCamelCase =tokenizer_r.encode('<special>' , add_special_tokens=A_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: __UpperCamelCase =self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ , ) __UpperCamelCase =self.tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) __UpperCamelCase =tokenizer_p.encode('Hey this is a <special> token' ) __UpperCamelCase =tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(A_ , A_ ) self.assertEqual(A_ , A_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = "facebook/nllb-200-distilled-600M" UpperCAmelCase__ : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] UpperCAmelCase__ : List[str] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] UpperCAmelCase__ : Optional[Any] = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def _a ( cls ) -> List[Any]: __UpperCamelCase =NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' ) __UpperCamelCase =1 return cls def _a ( self ) -> Tuple: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 256057 ) def _a ( self ) -> Dict: __UpperCamelCase =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A_ ) def _a ( self ) -> List[str]: self.assertIn(A_ , self.tokenizer.all_special_ids ) # fmt: off __UpperCamelCase =[RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on __UpperCamelCase =self.tokenizer.decode(A_ , skip_special_tokens=A_ ) __UpperCamelCase =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ ) self.assertNotIn(self.tokenizer.eos_token , A_ ) def _a ( self ) -> Any: __UpperCamelCase =['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] , A_ ) __UpperCamelCase =10 __UpperCamelCase =self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , A_ ) self.assertEqual(len(A_ ) , A_ ) def _a ( self ) -> List[str]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [256203, 3] ) def _a ( self ) -> Optional[Any]: __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) __UpperCamelCase =NllbTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ ) @require_torch def _a ( self ) -> Dict: __UpperCamelCase =self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , ) __UpperCamelCase =shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) __UpperCamelCase =batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A_ ) self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _a ( self ) -> Any: __UpperCamelCase =self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors='pt' ) __UpperCamelCase =self.tokenizer( text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=10 , return_tensors='pt' ) __UpperCamelCase =targets['input_ids'] __UpperCamelCase =shift_tokens_right( A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _a ( self ) -> Optional[Any]: __UpperCamelCase =self.tokenizer._build_translation_inputs( 'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(A_ ) , { # A, test, EOS, en_XX 'input_ids': [[256047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 256057, } , ) @require_torch def _a ( self ) -> Optional[int]: __UpperCamelCase =True __UpperCamelCase =self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) __UpperCamelCase =False __UpperCamelCase =self.tokenizer( 'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
62
'''simple docstring''' from __future__ import annotations def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]: """simple docstring""" _UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary _UpperCamelCase = frequencies_dict if not case_sensitive: _UpperCamelCase = ciphertext.lower() # Chi squared statistic values _UpperCamelCase = {} # cycle through all of the shifts for shift in range(len(lowercase ) ): _UpperCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.lower().count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCamelCase = min( lowercase, key=lowercase, ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
324
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase_ : List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =PegasusConfig __a ={} __a ='gelu' def __init__( self : Optional[Any] , __a : str , __a : List[str]=13 , __a : Union[str, Any]=7 , __a : List[str]=True , __a : Optional[int]=False , __a : Tuple=99 , __a : Dict=32 , __a : str=5 , __a : Any=4 , __a : Optional[int]=37 , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=20 , __a : List[str]=2 , __a : Optional[Any]=1 , __a : Optional[int]=0 , ): _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = eos_token_id _a = pad_token_id _a = bos_token_id def UpperCamelCase__ ( self : Any ): _a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) _a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) _a = np.concatenate([input_ids, eos_tensor] , axis=1 ) _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _a = prepare_pegasus_inputs_dict(__a , __a , __a ) return config, inputs_dict def UpperCamelCase__ ( self : str , __a : Tuple , __a : List[Any] , __a : Optional[int] ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , ) _a = model.decode(__a , __a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def UpperCamelCase__ ( self : Tuple , __a : Tuple , __a : int , __a : Dict ): _a = 20 _a = model_class_name(__a ) _a = model.encode(inputs_dict["input_ids"] ) _a , _a = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) _a = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _a = model.init_cache(decoder_input_ids.shape[0] , __a , __a ) _a = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _a = model.decode( decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , ) _a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) _a = model.decode( decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , ) _a = model.decode(__a , __a , decoder_attention_mask=__a ) _a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def _lowerCamelCase ( lowercase : Tuple , lowercase : Optional[int] , lowercase : int , lowercase : str=None , lowercase : Any=None , ) -> Union[str, Any]: if attention_mask is None: _a = np.not_equal(lowercase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: _a = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __a =( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a =True __a =False __a =False __a =False def UpperCamelCase__ ( self : Tuple ): _a = FlaxPegasusModelTester(self ) _a = ConfigTester(self , config_class=__a ) def UpperCamelCase__ ( self : Dict ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self : List[Any] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(__a , __a , __a ) def UpperCamelCase__ ( self : str ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a ) def UpperCamelCase__ ( self : Any ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(__a , __a ) _a = model_class(__a ) @jax.jit def encode_jitted(__a : Dict , __a : Dict=None , **__a : str ): return model.encode(input_ids=__a , attention_mask=__a ) with self.subTest("JIT Enabled" ): _a = encode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = encode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCamelCase__ ( self : Optional[int] ): _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = model_class(__a ) _a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) _a = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(__a : Dict , __a : List[str] , __a : str ): return model.decode( decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , ) with self.subTest("JIT Enabled" ): _a = decode_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): _a = decode_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCamelCase__ ( self : List[Any] ): for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__a ) _a = np.ones((1, 1) ) _a = model(__a ) self.assertIsNotNone(__a ) @slow def UpperCamelCase__ ( self : str ): _a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) _a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) _a = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _a = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] _a = tokenizer(__a , return_tensors="np" , truncation=__a , max_length=5_12 , padding=__a ) _a = model.generate(**__a , num_beams=2 ).sequences _a = tokenizer.batch_decode(__a , skip_special_tokens=__a ) assert tgt_text == decoded
63
'''simple docstring''' import math def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list: """simple docstring""" _UpperCamelCase = end or len(lowercase ) for i in range(lowercase, lowercase ): _UpperCamelCase = i _UpperCamelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCamelCase = array[temp_index - 1] temp_index -= 1 _UpperCamelCase = temp_index_value return array def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap """simple docstring""" _UpperCamelCase = index _UpperCamelCase = 2 * index + 1 # Left Node _UpperCamelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCamelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCamelCase = right_index if largest != index: _UpperCamelCase , _UpperCamelCase = array[largest], array[index] heapify(lowercase, lowercase, lowercase ) def a__ ( lowercase : list ) -> list: """simple docstring""" _UpperCamelCase = len(lowercase ) for i in range(n // 2, -1, -1 ): heapify(lowercase, lowercase, lowercase ) for i in range(n - 1, 0, -1 ): _UpperCamelCase , _UpperCamelCase = array[0], array[i] heapify(lowercase, 0, lowercase ) return array def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" _UpperCamelCase = low _UpperCamelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCamelCase , _UpperCamelCase = array[j], array[i] i += 1 def a__ ( lowercase : list ) -> list: """simple docstring""" if len(lowercase ) == 0: return array _UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) ) _UpperCamelCase = 16 return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase ) def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 _UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 ) _UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase ) intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase ) _UpperCamelCase = p return insertion_sort(lowercase, lowercase, lowercase ) if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Any = input('Enter numbers separated by a comma : ').strip() lowercase__ : Any = [float(item) for item in user_input.split(',')] print(sort(unsorted))
324
0
"""simple docstring""" from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def UpperCAmelCase__ (snake_case__ : List[Any] ): """simple docstring""" if not is_accelerate_available(): return method _snake_case : Union[str, Any] = version.parse(accelerate.__version__ ).base_version if version.parse(snake_case__ ) < version.parse("""0.17.0""" ): return method def wrapper(self : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : str ): if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ): self._hf_hook.pre_forward(self ) return method(self , *snake_case__ , **snake_case__ ) return wrapper
64
'''simple docstring''' import os import numpy import onnx def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]: """simple docstring""" _UpperCamelCase = a.name _UpperCamelCase = b.name _UpperCamelCase = '''''' _UpperCamelCase = '''''' _UpperCamelCase = a == b _UpperCamelCase = name_a _UpperCamelCase = name_b return res def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase, lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) _graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowercase, lowercase, lowercase ) def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple: """simple docstring""" _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _UpperCamelCase = inits[i].name _UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph, lowercase, lowercase ) def a__ ( lowercase : Dict ) -> Dict: """simple docstring""" _UpperCamelCase = os.path.dirname(lowercase ) _UpperCamelCase = os.path.basename(lowercase ) _UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) ) _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = set() _UpperCamelCase = {} _UpperCamelCase = [] _UpperCamelCase = 0 for i in range(len(lowercase ) ): if i in dup_set: continue for j in range(i + 1, len(lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i], inits[j] ): dup_set.add(lowercase ) dup_set.add(lowercase ) _UpperCamelCase = inits[j].data_type _UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''', lowercase ) total_reduced_size += mem_size _UpperCamelCase = inits[i].name _UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase ) else: _UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' ) _UpperCamelCase = sorted(lowercase ) _remove_dup_initializers_from_model(lowercase, lowercase, lowercase ) _UpperCamelCase = '''optimized_''' + model_file_name _UpperCamelCase = os.path.join(lowercase, lowercase ) onnx.save(lowercase, lowercase ) return new_model
324
0
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput UpperCamelCase__ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class A ( UpperCAmelCase_ ): def __init__(self : Any , *__UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) UpperCAmelCase__ = eval_examples UpperCAmelCase__ = post_process_function UpperCAmelCase__ = quant_trainer_args UpperCAmelCase__ = 1_2_8 # default number of calibration samples def lowercase_ (self : int , __UpperCAmelCase : str=None ) -> Union[str, Any]: """simple docstring""" if calib_dataset is None and self.calib_dataset is None: raise ValueError("Trainer: calibration requires an calib_dataset." ) UpperCAmelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset UpperCAmelCase__ = self._remove_unused_columns(__UpperCAmelCase , description="Calibration" ) return DataLoader( __UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__UpperCAmelCase , ) def lowercase_ (self : int , __UpperCAmelCase : int=None ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.train_dataset if calib_dataset is None else calib_dataset UpperCAmelCase__ = self.get_calib_dataloader(__UpperCAmelCase ) UpperCAmelCase__ = self.model quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args , calib=__UpperCAmelCase ) model.eval() quant_trainer.enable_calibration(__UpperCAmelCase ) logger.info("***** Running calibration *****" ) logger.info(f""" Num examples = {self.calib_num}""" ) logger.info(f""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(__UpperCAmelCase ): # Prediction step UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.prediction_step(__UpperCAmelCase , __UpperCAmelCase , prediction_loss_only=__UpperCAmelCase ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(__UpperCAmelCase , self.quant_trainer_args ) UpperCAmelCase__ = model def lowercase_ (self : List[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None , __UpperCAmelCase : str = "eval" ) -> int: """simple docstring""" UpperCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset UpperCAmelCase__ = self.get_eval_dataloader(__UpperCAmelCase ) UpperCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ = self.compute_metrics UpperCAmelCase__ = None UpperCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ = eval_loop( __UpperCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , ) finally: UpperCAmelCase__ = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: UpperCAmelCase__ = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions ) UpperCAmelCase__ = self.compute_metrics(__UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCAmelCase__ = metrics.pop(__UpperCAmelCase ) self.log(__UpperCAmelCase ) else: UpperCAmelCase__ = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase ) return metrics def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" ) -> Dict: """simple docstring""" UpperCAmelCase__ = self.get_test_dataloader(__UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. UpperCAmelCase__ = self.compute_metrics UpperCAmelCase__ = None UpperCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: UpperCAmelCase__ = eval_loop( __UpperCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , ) finally: UpperCAmelCase__ = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output UpperCAmelCase__ = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , "predict" ) UpperCAmelCase__ = self.compute_metrics(__UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCAmelCase__ = metrics.pop(__UpperCAmelCase ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase ) def lowercase_ (self : Optional[int] , __UpperCAmelCase : int="./" ) -> Any: """simple docstring""" UpperCAmelCase__ = self.eval_dataset UpperCAmelCase__ = self.get_eval_dataloader(__UpperCAmelCase ) UpperCAmelCase__ = next(iter(__UpperCAmelCase ) ) # saving device - to make it consistent UpperCAmelCase__ = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) # convert to tuple UpperCAmelCase__ = tuple(v.to(__UpperCAmelCase ) for k, v in batch.items() ) logger.info("Converting model to be onnx compatible" ) from pytorch_quantization.nn import TensorQuantizer UpperCAmelCase__ = True UpperCAmelCase__ = self.model.to(__UpperCAmelCase ) model.eval() model.float() UpperCAmelCase__ = model.module if hasattr(__UpperCAmelCase , "module" ) else model quant_trainer.configure_model(__UpperCAmelCase , self.quant_trainer_args ) UpperCAmelCase__ = os.path.join(__UpperCAmelCase , "model.onnx" ) logger.info(f"""exporting model to {output_model_file}""" ) UpperCAmelCase__ = {0: "batch_size", 1: "seq_len"} torch.onnx.export( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , export_params=__UpperCAmelCase , opset_version=1_3 , do_constant_folding=__UpperCAmelCase , input_names=["input_ids", "attention_mask", "token_type_ids"] , output_names=["output_start_logits", "output_end_logits"] , dynamic_axes={ "input_ids": axes, "attention_mask": axes, "token_type_ids": axes, "output_start_logits": axes, "output_end_logits": axes, } , verbose=__UpperCAmelCase , ) logger.info("onnx export finished" )
65
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase__ : List[Any] = 25_00_04 lowercase__ : str = 25_00_20 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MBartTokenizer _snake_case : Tuple = MBartTokenizerFast _snake_case : List[str] = True _snake_case : Optional[Any] = True def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Dict = 'facebook/mbart-large-en-ro' _snake_case : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _snake_case : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case__ ( cls : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCamelCase = 1 return cls def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def snake_case__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) _UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCAmelCase__ ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case__ ( self : Tuple ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 250004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
324
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self: Union[str, Any] , snake_case: Optional[Any] , snake_case: List[str]=7 , snake_case: List[str]=3 , snake_case: Tuple=18 , snake_case: Optional[int]=30 , snake_case: Optional[Any]=400 , snake_case: Tuple=True , snake_case: int=None , snake_case: Optional[int]=True , snake_case: int=None , ) -> Dict: snake_case_ :Optional[Any] = size if size is not None else {"""shortest_edge""": 20} snake_case_ :int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} snake_case_ :List[Any] = parent snake_case_ :Any = batch_size snake_case_ :List[Any] = num_channels snake_case_ :Any = image_size snake_case_ :Tuple = min_resolution snake_case_ :Optional[Any] = max_resolution snake_case_ :Tuple = do_resize snake_case_ :Any = size snake_case_ :List[str] = do_center_crop snake_case_ :Dict = crop_size def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' _A : Any = MobileNetVaImageProcessor if is_vision_available() else None def lowerCAmelCase_ ( self: str ) -> Dict: snake_case_ :Any = MobileNetVaImageProcessingTester(self ) @property def lowerCAmelCase_ ( self: Dict ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase_ ( self: List[str] ) -> List[Any]: snake_case_ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , """do_resize""" ) ) self.assertTrue(hasattr(snake_case , """size""" ) ) self.assertTrue(hasattr(snake_case , """do_center_crop""" ) ) self.assertTrue(hasattr(snake_case , """crop_size""" ) ) def lowerCAmelCase_ ( self: Dict ) -> Tuple: snake_case_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 20} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) snake_case_ :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCAmelCase_ ( self: Tuple ) -> Tuple: pass def lowerCAmelCase_ ( self: Tuple ) -> List[Any]: # Initialize image_processing snake_case_ :str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ :int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input snake_case_ :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ :List[Any] = image_processing(snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self: Any ) -> Tuple: # Initialize image_processing snake_case_ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input snake_case_ :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ :int = image_processing(snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCAmelCase_ ( self: str ) -> List[str]: # Initialize image_processing snake_case_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input snake_case_ :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched snake_case_ :int = image_processing(snake_case , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
66
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase__ : str = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Union[str, Any] = ['pixel_values'] def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 256} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray: '''simple docstring''' return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]: '''simple docstring''' _UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase__ ): _UpperCamelCase = target_sizes.numpy() _UpperCamelCase = [] for idx in range(len(lowerCAmelCase__ ) ): _UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ ) _UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase__ ) else: _UpperCamelCase = logits.argmax(dim=1 ) _UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
324
0
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) __lowerCamelCase = '''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCamelCase__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
67
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : jnp.ndarray @flax_register_to_config class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : int = 3_2 _snake_case : int = 4 _snake_case : int = 4 _snake_case : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _snake_case : Union[bool, Tuple[bool]] = False _snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _snake_case : int = 2 _snake_case : Union[int, Tuple[int]] = 8 _snake_case : Optional[Union[int, Tuple[int]]] = None _snake_case : int = 1_2_8_0 _snake_case : float = 0.0 _snake_case : bool = False _snake_case : jnp.dtype = jnp.floataa _snake_case : bool = True _snake_case : int = 0 _snake_case : bool = False def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"] def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype ) _UpperCamelCase = self.only_cross_attention if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = down_blocks # mid _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCamelCase = [] _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = output_channel _UpperCamelCase = up_blocks # out _UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 ) _UpperCamelCase = self.time_proj(lowerCAmelCase__ ) _UpperCamelCase = self.time_embedding(lowerCAmelCase__ ) # 2. pre-process _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(lowerCAmelCase__ ) # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( lowerCAmelCase__ , lowerCAmelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = new_down_block_res_samples # 4. mid _UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = up_block( lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , ) else: _UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train ) # 6. post-process _UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ ) _UpperCamelCase = nn.silu(lowerCAmelCase__ ) _UpperCamelCase = self.conv_out(lowerCAmelCase__ ) _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
324
0
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = TransfoXLTokenizer __lowerCamelCase = False __lowerCamelCase = False def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' super().setUp() A__ = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCamelCase ( self , **lowercase ) -> Union[str, Any]: '''simple docstring''' A__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def UpperCamelCase ( self , lowercase ) -> List[Any]: '''simple docstring''' A__ = "<unk> UNwanted , running" A__ = "<unk> unwanted, running" return input_text, output_text def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowercase ) A__ = tokenizer.tokenize("<unk> UNwanted , running" ) self.assertListEqual(lowercase , ["<unk>", "unwanted", ",", "running"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [0, 4, 8, 7] ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = TransfoXLTokenizer(lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] ) def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' A__ = TransfoXLTokenizer(lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = TransfoXLTokenizer(lower_case=lowercase ) A__ = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" A__ = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(lowercase ) , lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = self.get_tokenizer() A__ = len(lowercase ) tokenizer.add_tokens(["new1", "new2"] ) tokenizer.move_added_token("new1" , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(lowercase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1" ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , "new1" )
68
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase__ : List[str] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase__ : Dict = logging.getLogger() def a__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int: """simple docstring""" _UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: return json.load(lowercase ) raise ValueError(F"""can't find {path}""" ) lowercase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def snake_case__ ( self : Any ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_glue.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def snake_case__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_clm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def snake_case__ ( self : Tuple ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_summarization_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def snake_case__ ( self : str ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_ta_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_ner.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_qa.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
324
0
"""simple docstring""" import json import os import torch from diffusers import UNetaDModel os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True) os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True) def UpperCAmelCase ( UpperCAmelCase ) -> Dict: if hor == 128: snake_case_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') snake_case_ = (32, 128, 256) snake_case_ = ('UpResnetBlock1D', 'UpResnetBlock1D') elif hor == 32: snake_case_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D') snake_case_ = (32, 64, 128, 256) snake_case_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D') snake_case_ = torch.load(f'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' ) snake_case_ = model.state_dict() snake_case_ = { 'down_block_types': down_block_types, 'block_out_channels': block_out_channels, 'up_block_types': up_block_types, 'layers_per_block': 1, 'use_timestep_embedding': True, 'out_block_type': 'OutConv1DBlock', 'norm_num_groups': 8, 'downsample_each_block': False, 'in_channels': 14, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'flip_sin_to_cos': False, 'freq_shift': 1, 'sample_size': 65536, 'mid_block_type': 'MidResTemporalBlock1D', 'act_fn': 'mish', } snake_case_ = UNetaDModel(**UpperCAmelCase ) print(f'length of state dict: {len(state_dict.keys() )}' ) print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' ) snake_case_ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ = state_dict.pop(UpperCAmelCase ) hf_value_function.load_state_dict(UpperCAmelCase ) torch.save(hf_value_function.state_dict() , f'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' ) with open(f'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f: json.dump(UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( ) -> int: snake_case_ = { 'in_channels': 14, 'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'), 'up_block_types': (), 'out_block_type': 'ValueFunction', 'mid_block_type': 'ValueFunctionMidBlock1D', 'block_out_channels': (32, 64, 128, 256), 'layers_per_block': 1, 'downsample_each_block': True, 'sample_size': 65536, 'out_channels': 14, 'extra_in_channels': 0, 'time_embedding_type': 'positional', 'use_timestep_embedding': True, 'flip_sin_to_cos': False, 'freq_shift': 1, 'norm_num_groups': 8, 'act_fn': 'mish', } snake_case_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' ) snake_case_ = model snake_case_ = UNetaDModel(**UpperCAmelCase ) print(f'length of state dict: {len(state_dict.keys() )}' ) print(f'length of value function dict: {len(hf_value_function.state_dict().keys() )}' ) snake_case_ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ = state_dict.pop(UpperCAmelCase ) hf_value_function.load_state_dict(UpperCAmelCase ) torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' ) with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f: json.dump(UpperCAmelCase , UpperCAmelCase ) if __name__ == "__main__": unet(32) # unet(128) value_function()
69
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ : Optional[Any] = logging.getLogger() def a__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Dict ) -> int: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = os.path.join(lowercase, '''all_results.json''' ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: _UpperCamelCase = json.load(lowercase ) else: raise ValueError(F"""can't find {path}""" ) return results def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod def snake_case__ ( cls : Optional[int] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Tuple ) -> int: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : int ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) ) @slow def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCAmelCase__ ) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
324
0
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ): """simple docstring""" for nxt, d in graph[v]: if nxt in visited_forward: continue _lowerCAmelCase = cst_fwd.get(lowerCAmelCase , np.inf ) _lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) _lowerCAmelCase = new_cost_f _lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: _lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = -1 _lowerCAmelCase = set() _lowerCAmelCase = set() _lowerCAmelCase = {source: 0} _lowerCAmelCase = {destination: 0} _lowerCAmelCase = {source: None} _lowerCAmelCase = {destination: None} _lowerCAmelCase = PriorityQueue() _lowerCAmelCase = PriorityQueue() _lowerCAmelCase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): _lowerCAmelCase , _lowerCAmelCase = queue_forward.get() visited_forward.add(lowerCAmelCase ) _lowerCAmelCase , _lowerCAmelCase = queue_backward.get() visited_backward.add(lowerCAmelCase ) _lowerCAmelCase = pass_and_relaxation( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) _lowerCAmelCase = pass_and_relaxation( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: _lowerCAmelCase = shortest_distance return shortest_path_distance A__ : Optional[int] ={ '''B''': [['''C''', 1]], '''C''': [['''D''', 1]], '''D''': [['''F''', 1]], '''E''': [['''B''', 1], ['''G''', 2]], '''F''': [], '''G''': [['''F''', 1]], } A__ : int ={ '''B''': [['''E''', 1]], '''C''': [['''B''', 1]], '''D''': [['''C''', 1]], '''F''': [['''D''', 1], ['''G''', 1]], '''E''': [[None, np.inf]], '''G''': [['''E''', 2]], } if __name__ == "__main__": import doctest doctest.testmod()
70
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" _UpperCamelCase = iter(lowercase ) while True: _UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) ) if not chunk: return yield chunk def a__ ( lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCamelCase = '''''' if len(lowercase ) < 2: return dirty for i in range(len(lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase ) & 1: clean += "X" return clean def a__ ( lowercase : str ) -> list[str]: """simple docstring""" _UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCamelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase ) return table def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = prepare_input(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
324
0
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class __A ( a , a ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 128 , lowerCamelCase__ = 256 , lowerCamelCase__ = 2_000.0 , lowerCamelCase__ = 768 , lowerCamelCase__ = 12 , lowerCamelCase__ = 12 , lowerCamelCase__ = 64 , lowerCamelCase__ = 2048 , lowerCamelCase__ = 0.1 , ): """simple docstring""" super().__init__() __UpperCamelCase : str =nn.Sequential( nn.Linear(lowerCamelCase__ , d_model * 4 , bias=lowerCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase__ ) , nn.SiLU() , ) __UpperCamelCase : Any =nn.Embedding(lowerCamelCase__ , lowerCamelCase__ ) __UpperCamelCase : Optional[int] =False __UpperCamelCase : Dict =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =nn.Dropout(p=lowerCamelCase__ ) __UpperCamelCase : int =nn.ModuleList() for lyr_num in range(lowerCamelCase__ ): # FiLM conditional T5 decoder __UpperCamelCase : Tuple =DecoderLayer(d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) self.decoders.append(lowerCamelCase__ ) __UpperCamelCase : List[str] =TaLayerNorm(lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Dropout(p=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] =decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __UpperCamelCase : Tuple =get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) __UpperCamelCase : Union[str, Any] =self.conditioning_emb(lowerCamelCase__ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __UpperCamelCase : Optional[int] =decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __UpperCamelCase : List[Any] =torch.broadcast_to( torch.arange(lowerCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) __UpperCamelCase : Any =self.position_encoding(lowerCamelCase__ ) __UpperCamelCase : Any =self.continuous_inputs_projection(lowerCamelCase__ ) inputs += position_encodings __UpperCamelCase : Optional[Any] =self.dropout(lowerCamelCase__ ) # decoder: No padding present. __UpperCamelCase : Any =torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. __UpperCamelCase : str =[(x, self.encoder_decoder_mask(lowerCamelCase__ , lowerCamelCase__ )) for x, y in encodings_and_masks] # cross attend style: concat encodings __UpperCamelCase : Any =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) __UpperCamelCase : List[Any] =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: __UpperCamelCase : Optional[Any] =lyr( lowerCamelCase__ , conditioning_emb=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )[0] __UpperCamelCase : Tuple =self.decoder_norm(lowerCamelCase__ ) __UpperCamelCase : Tuple =self.post_dropout(lowerCamelCase__ ) __UpperCamelCase : Dict =self.spec_out(lowerCamelCase__ ) return spec_out class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-6 ): """simple docstring""" super().__init__() __UpperCamelCase : Optional[int] =nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCamelCase__ , d_kv=lowerCamelCase__ , num_heads=lowerCamelCase__ , dropout_rate=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ ) ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" __UpperCamelCase : Optional[Any] =self.layer[0]( lowerCamelCase__ , conditioning_emb=lowerCamelCase__ , attention_mask=lowerCamelCase__ , ) if encoder_hidden_states is not None: __UpperCamelCase : str =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) __UpperCamelCase : int =self.layer[1]( lowerCamelCase__ , key_value_states=lowerCamelCase__ , attention_mask=lowerCamelCase__ , ) # Apply Film Conditional Feed Forward layer __UpperCamelCase : Union[str, Any] =self.layer[-1](lowerCamelCase__ , lowerCamelCase__ ) return (hidden_states,) class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Any =TaLayerNorm(lowerCamelCase__ ) __UpperCamelCase : Tuple =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =Attention(query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , out_bias=lowerCamelCase__ , scale_qk=lowerCamelCase__ ) __UpperCamelCase : List[str] =nn.Dropout(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" __UpperCamelCase : int =self.layer_norm(lowerCamelCase__ ) if conditioning_emb is not None: __UpperCamelCase : str =self.FiLMLayer(lowerCamelCase__ , lowerCamelCase__ ) # Self-attention block __UpperCamelCase : int =self.attention(lowerCamelCase__ ) __UpperCamelCase : Any =hidden_states + self.dropout(lowerCamelCase__ ) return hidden_states class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Tuple =Attention(query_dim=lowerCamelCase__ , heads=lowerCamelCase__ , dim_head=lowerCamelCase__ , out_bias=lowerCamelCase__ , scale_qk=lowerCamelCase__ ) __UpperCamelCase : List[str] =TaLayerNorm(lowerCamelCase__ , eps=lowerCamelCase__ ) __UpperCamelCase : Any =nn.Dropout(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" __UpperCamelCase : List[Any] =self.layer_norm(lowerCamelCase__ ) __UpperCamelCase : Tuple =self.attention( lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , ) __UpperCamelCase : int =hidden_states + self.dropout(lowerCamelCase__ ) return layer_output class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Any =TaDenseGatedActDense(d_model=lowerCamelCase__ , d_ff=lowerCamelCase__ , dropout_rate=lowerCamelCase__ ) __UpperCamelCase : Tuple =TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase__ ) __UpperCamelCase : Dict =TaLayerNorm(lowerCamelCase__ , eps=lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =nn.Dropout(lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" __UpperCamelCase : List[str] =self.layer_norm(lowerCamelCase__ ) if conditioning_emb is not None: __UpperCamelCase : List[str] =self.film(lowerCamelCase__ , lowerCamelCase__ ) __UpperCamelCase : Union[str, Any] =self.DenseReluDense(lowerCamelCase__ ) __UpperCamelCase : Any =hidden_states + self.dropout(lowerCamelCase__ ) return hidden_states class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : List[str] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : Optional[int] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : List[Any] =nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) __UpperCamelCase : List[Any] =nn.Dropout(lowerCamelCase__ ) __UpperCamelCase : Any =NewGELUActivation() def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any =self.act(self.wi_a(lowerCamelCase__ ) ) __UpperCamelCase : Union[str, Any] =self.wi_a(lowerCamelCase__ ) __UpperCamelCase : Any =hidden_gelu * hidden_linear __UpperCamelCase : List[str] =self.dropout(lowerCamelCase__ ) __UpperCamelCase : Optional[Any] =self.wo(lowerCamelCase__ ) return hidden_states class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=1E-6 ): """simple docstring""" super().__init__() __UpperCamelCase : Any =nn.Parameter(torch.ones(lowerCamelCase__ ) ) __UpperCamelCase : str =eps def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Any =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase__ ) __UpperCamelCase : Dict =hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __UpperCamelCase : List[str] =hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class __A ( nn.Module ): """simple docstring""" def __lowercase ( self , lowerCamelCase__ ): """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCamelCase__ , 3.0 )) )) class __A ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" super().__init__() __UpperCamelCase : Optional[int] =nn.Linear(lowerCamelCase__ , out_features * 2 , bias=lowerCamelCase__ ) def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" __UpperCamelCase : Optional[int] =self.scale_bias(lowerCamelCase__ ) __UpperCamelCase , __UpperCamelCase : Optional[int] =torch.chunk(lowerCamelCase__ , 2 , -1 ) __UpperCamelCase : int =x * (1 + scale) + shift return x
71
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Any = {'vocab_file': 'spiece.model'} lowercase__ : Dict = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } lowercase__ : Optional[Any] = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ['input_ids', 'attention_mask'] _snake_case : List[int] = [] def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> Tuple: '''simple docstring''' return self.sp_model.get_piece_size() def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = '''''' _UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__ ) + token _UpperCamelCase = True _UpperCamelCase = [] else: current_sub_tokens.append(lowerCAmelCase__ ) _UpperCamelCase = False out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str: '''simple docstring''' _UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ ) _UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCamelCase = [] _UpperCamelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) _UpperCamelCase = [] sub_texts.append(lowerCAmelCase__ ) else: current_sub_text.append(lowerCAmelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: _UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) ) else: _UpperCamelCase = ''''''.join(lowerCAmelCase__ ) _UpperCamelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ ) return clean_text else: return text def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] _UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
324
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __snake_case ( _lowercase , unittest.TestCase): snake_case__ : Any = KandinskyVaaImgaImgPipeline snake_case__ : int = ["image_embeds", "negative_image_embeds", "image"] snake_case__ : Optional[int] = [ "image_embeds", "negative_image_embeds", "image", ] snake_case__ : Tuple = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] snake_case__ : Any = False @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return 3_2 @property def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return 3_2 @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.time_input_dim @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return 1_0_0 @property def SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : List[str] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } _lowerCamelCase : List[Any] = UNetaDConditionModel(**__lowerCAmelCase ) return model @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) _lowerCamelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : List[Any] = self.dummy_unet _lowerCamelCase : Union[str, Any] = self.dummy_movq _lowerCamelCase : Any = { '''num_train_timesteps''': 1_0_0_0, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } _lowerCamelCase : Optional[Any] = DDIMScheduler(**__lowerCAmelCase ) _lowerCamelCase : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=0 ): """simple docstring""" _lowerCamelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowerCamelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowerCAmelCase ) # create init_image _lowerCamelCase : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) _lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCamelCase : int = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) ) if str(__lowerCAmelCase ).startswith('''mps''' ): _lowerCamelCase : Any = torch.manual_seed(__lowerCAmelCase ) else: _lowerCamelCase : Union[str, Any] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) _lowerCamelCase : int = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 6_4, '''width''': 6_4, '''num_inference_steps''': 1_0, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = '''cpu''' _lowerCamelCase : Dict = self.get_dummy_components() _lowerCamelCase : Dict = self.pipeline_class(**__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = pipe.to(__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) ) _lowerCamelCase : Optional[int] = output.images _lowerCamelCase : str = pipe( **self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0] _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] _lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) _lowerCamelCase : Optional[int] = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class __snake_case ( unittest.TestCase): def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" _lowerCamelCase : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) _lowerCamelCase : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) _lowerCamelCase : int = '''A red cartoon frog, 4k''' _lowerCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) _lowerCamelCase : Tuple = pipeline.to(__lowerCAmelCase ) pipeline.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : str = torch.Generator(device='''cpu''' ).manual_seed(0 ) _lowerCamelCase , _lowerCamelCase : Any = pipe_prior( __lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() _lowerCamelCase : Union[str, Any] = pipeline( image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , ) _lowerCamelCase : int = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
72
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 'audio-spectrogram-transformer' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = patch_size _UpperCamelCase = qkv_bias _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins
324
0
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a =logging.get_logger(__name__) a ={"""vocab_file""": """spiece.model"""} a ={ """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } a ={ """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } a ="""▁""" class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES _UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" ,SCREAMING_SNAKE_CASE__ : Any="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" ,SCREAMING_SNAKE_CASE__ : Any="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. __lowerCamelCase : Dict = ( AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__ ,normalized=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token ) __lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,) __lowerCamelCase : Any = do_lower_case __lowerCamelCase : Union[str, Any] = remove_space __lowerCamelCase : Tuple = keep_accents __lowerCamelCase : Dict = vocab_file __lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(SCREAMING_SNAKE_CASE__) @property def lowerCAmelCase ( self : Optional[Any]): return len(self.sp_model) def lowerCAmelCase ( self : Optional[Any]): __lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Union[str, Any]): __lowerCamelCase : str = self.__dict__.copy() __lowerCamelCase : Tuple = None return state def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : List[str] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs'): __lowerCamelCase : List[str] = {} __lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]): if self.remove_space: __lowerCamelCase : Dict = ' '.join(inputs.strip().split()) else: __lowerCamelCase : Optional[Any] = inputs __lowerCamelCase : Tuple = outputs.replace('``' ,'"').replace('\'\'' ,'"') if not self.keep_accents: __lowerCamelCase : List[str] = unicodedata.normalize('NFKD' ,SCREAMING_SNAKE_CASE__) __lowerCamelCase : str = ''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__)]) if self.do_lower_case: __lowerCamelCase : Optional[Any] = outputs.lower() return outputs def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str): __lowerCamelCase : Tuple = self.preprocess_text(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__) __lowerCamelCase : Tuple = [] for piece in pieces: if len(SCREAMING_SNAKE_CASE__) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): __lowerCamelCase : int = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ ,'')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: __lowerCamelCase : Union[str, Any] = cur_pieces[1:] else: __lowerCamelCase : Dict = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(SCREAMING_SNAKE_CASE__) else: new_pieces.append(SCREAMING_SNAKE_CASE__) return new_pieces def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]): return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Any): return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__) def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int): __lowerCamelCase : Optional[Any] = [] __lowerCamelCase : int = '' __lowerCamelCase : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token __lowerCamelCase : List[Any] = True __lowerCamelCase : Any = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__) __lowerCamelCase : List[Any] = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) return out_string.strip() def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : Union[str, Any] = [self.sep_token_id] __lowerCamelCase : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None): __lowerCamelCase : Tuple = [self.sep_token_id] __lowerCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None): if not os.path.isdir(SCREAMING_SNAKE_CASE__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __lowerCamelCase : List[str] = os.path.join( SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi: __lowerCamelCase : str = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__) return (out_vocab_file,)
73
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase__ : Union[str, Any] = logging.get_logger(__name__) # General docstring lowercase__ : Dict = 'ResNetConfig' # Base docstring lowercase__ : str = 'microsoft/resnet-50' lowercase__ : Tuple = [1, 20_48, 7, 7] # Image classification docstring lowercase__ : Optional[Any] = 'microsoft/resnet-50' lowercase__ : List[str] = 'tiger cat' lowercase__ : List[Any] = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad( lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCamelCase = config.num_channels def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.pooler(lowerCAmelCase__ ) return embedding class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = out_channels // reduction _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = input for layer in self.layers: _UpperCamelCase = layer(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowerCAmelCase__ ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = ResNetConfig _snake_case : Union[str, Any] = 'resnet' _snake_case : Optional[int] = 'pixel_values' _snake_case : int = True def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' if isinstance(lowerCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = value lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowerCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config.num_labels _UpperCamelCase = ResNetModel(lowerCAmelCase__ ) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowerCAmelCase__ ) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = '''single_label_classification''' else: _UpperCamelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) super()._init_backbone(lowerCAmelCase__ ) _UpperCamelCase = [config.embedding_size] + config.hidden_sizes _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.hidden_states _UpperCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
324
0
"""simple docstring""" import argparse import struct import unittest class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : bytes ) -> None: A = data # Initialize hash values A = [ 0X6_A_0_9_E_6_6_7, 0XB_B_6_7_A_E_8_5, 0X3_C_6_E_F_3_7_2, 0XA_5_4_F_F_5_3_A, 0X5_1_0_E_5_2_7_F, 0X9_B_0_5_6_8_8_C, 0X1_F_8_3_D_9_A_B, 0X5_B_E_0_C_D_1_9, ] # Initialize round constants A = [ 0X4_2_8_A_2_F_9_8, 0X7_1_3_7_4_4_9_1, 0XB_5_C_0_F_B_C_F, 0XE_9_B_5_D_B_A_5, 0X3_9_5_6_C_2_5_B, 0X5_9_F_1_1_1_F_1, 0X9_2_3_F_8_2_A_4, 0XA_B_1_C_5_E_D_5, 0XD_8_0_7_A_A_9_8, 0X1_2_8_3_5_B_0_1, 0X2_4_3_1_8_5_B_E, 0X5_5_0_C_7_D_C_3, 0X7_2_B_E_5_D_7_4, 0X8_0_D_E_B_1_F_E, 0X9_B_D_C_0_6_A_7, 0XC_1_9_B_F_1_7_4, 0XE_4_9_B_6_9_C_1, 0XE_F_B_E_4_7_8_6, 0X0_F_C_1_9_D_C_6, 0X2_4_0_C_A_1_C_C, 0X2_D_E_9_2_C_6_F, 0X4_A_7_4_8_4_A_A, 0X5_C_B_0_A_9_D_C, 0X7_6_F_9_8_8_D_A, 0X9_8_3_E_5_1_5_2, 0XA_8_3_1_C_6_6_D, 0XB_0_0_3_2_7_C_8, 0XB_F_5_9_7_F_C_7, 0XC_6_E_0_0_B_F_3, 0XD_5_A_7_9_1_4_7, 0X0_6_C_A_6_3_5_1, 0X1_4_2_9_2_9_6_7, 0X2_7_B_7_0_A_8_5, 0X2_E_1_B_2_1_3_8, 0X4_D_2_C_6_D_F_C, 0X5_3_3_8_0_D_1_3, 0X6_5_0_A_7_3_5_4, 0X7_6_6_A_0_A_B_B, 0X8_1_C_2_C_9_2_E, 0X9_2_7_2_2_C_8_5, 0XA_2_B_F_E_8_A_1, 0XA_8_1_A_6_6_4_B, 0XC_2_4_B_8_B_7_0, 0XC_7_6_C_5_1_A_3, 0XD_1_9_2_E_8_1_9, 0XD_6_9_9_0_6_2_4, 0XF_4_0_E_3_5_8_5, 0X1_0_6_A_A_0_7_0, 0X1_9_A_4_C_1_1_6, 0X1_E_3_7_6_C_0_8, 0X2_7_4_8_7_7_4_C, 0X3_4_B_0_B_C_B_5, 0X3_9_1_C_0_C_B_3, 0X4_E_D_8_A_A_4_A, 0X5_B_9_C_C_A_4_F, 0X6_8_2_E_6_F_F_3, 0X7_4_8_F_8_2_E_E, 0X7_8_A_5_6_3_6_F, 0X8_4_C_8_7_8_1_4, 0X8_C_C_7_0_2_0_8, 0X9_0_B_E_F_F_F_A, 0XA_4_5_0_6_C_E_B, 0XB_E_F_9_A_3_F_7, 0XC_6_7_1_7_8_F_2, ] A = self.preprocessing(self.data ) self.final_hash() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes: A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64)) A = struct.pack('>Q' ,(len(A_ ) * 8) ) return data + padding + big_endian_integer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None: # Convert into blocks of 64 bytes A = [ self.preprocessed_data[x : x + 64] for x in range(0 ,len(self.preprocessed_data ) ,64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A = list(struct.unpack('>16L' ,A_ ) ) # add 48 0-ed integers words += [0] * 48 A , A , A , A , A , A , A , A = self.hashes for index in range(0 ,64 ): if index > 15: # modify the zero-ed indexes at the end of the array A = ( self.ror(words[index - 15] ,7 ) ^ self.ror(words[index - 15] ,18 ) ^ (words[index - 15] >> 3) ) A = ( self.ror(words[index - 2] ,17 ) ^ self.ror(words[index - 2] ,19 ) ^ (words[index - 2] >> 10) ) A = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X1_0_0_0_0_0_0_0_0 # Compression A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 ) A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g) A = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X1_0_0_0_0_0_0_0_0 A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 ) A = (a & b) ^ (a & c) ^ (b & c) A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0 A , A , A , A , A , A , A , A = ( g, f, e, ((d + tempa) % 0X1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0), ) A = [a, b, c, d, e, f, g, h] # Modify final values A = [ ((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int: return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: import hashlib A = bytes('Test String' ,'utf-8' ) self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() ) def _snake_case ( ): import doctest doctest.testmod() A = argparse.ArgumentParser() parser.add_argument( '-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument( '-f' , '--file' , dest='input_file' , help='Hash contents of a file' ) A = parser.parse_args() A = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: A = f.read() else: A = bytes(snake_case__ , 'utf-8' ) print(SHAaaa(snake_case__ ).hash ) if __name__ == "__main__": main()
74
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def a__ ( lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" if isinstance(lowercase, collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCAmelCase : """simple docstring""" def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple ) -> int: '''simple docstring''' pass def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str: '''simple docstring''' _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = after_output[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple: '''simple docstring''' pt_model.to(lowerCAmelCase__ ) pt_model.eval() # prepare inputs _UpperCamelCase = inputs_dict _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple() _UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ ) _UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ ) pt_model_loaded.to(lowerCAmelCase__ ) pt_model_loaded.eval() with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 ) def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ ) _UpperCamelCase = fx_state self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__ ) @is_pt_flax_cross_test def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_inputs_dict.pop('''vision_config''' ) _UpperCamelCase = config_inputs_dict.pop('''text_config''' ) _UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = after_outputs[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-5 ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxViTModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = FlaxViTModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : List[str] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ) _UpperCamelCase = model(**lowerCAmelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
324
0
'''simple docstring''' from math import loga def a_ ( __snake_case : int ) -> int: """simple docstring""" if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Union[str, Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] _UpperCamelCase = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase__ ) _UpperCamelCase = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
324
0
from __future__ import annotations class _UpperCamelCase : '''simple docstring''' def __init__( self : Optional[Any] , a : int = 0 ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = key def __UpperCamelCase ( self : Optional[int] , a : str , a : int ) -> list[str]: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a ) ^ key ) for ch in content] def __UpperCamelCase ( self : List[Any] , a : str , a : int ) -> list[str]: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a ) ^ key ) for ch in content] def __UpperCamelCase ( self : Optional[Any] , a : str , a : int = 0 ) -> str: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : Dict = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE : Optional[Any] = "" for ch in content: ans += chr(ord(a ) ^ key ) return ans def __UpperCamelCase ( self : int , a : str , a : int = 0 ) -> str: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) SCREAMING_SNAKE_CASE : str = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned SCREAMING_SNAKE_CASE : int = "" for ch in content: ans += chr(ord(a ) ^ key ) return ans def __UpperCamelCase ( self : str , a : str , a : int = 0 ) -> bool: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) try: with open(a ) as fin, open("encrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(a , a ) ) except OSError: return False return True def __UpperCamelCase ( self : Optional[int] , a : str , a : int ) -> bool: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) try: with open(a ) as fin, open("decrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(a , a ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
76
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std def snake_case__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Tuple = LevitImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = LevitImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
324
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Union[str, Any] = abs(_lowerCAmelCase ) lowercase__ : Union[str, Any] = 0 while n > 0: res += n % 10 n //= 10 return res def a_ ( _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Optional[Any] = abs(_lowerCAmelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def a_ ( _lowerCAmelCase : int ): '''simple docstring''' return sum(int(_lowerCAmelCase ) for c in str(abs(_lowerCAmelCase ) ) ) def a_ ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None: lowercase__ : str = f"""{func.__name__}({value})""" lowercase__ : Union[str, Any] = timeit(f"""__main__.{call}""" , setup='import __main__' ) print(f"""{call:56} = {func(_lowerCAmelCase )} -- {timing:.4f} seconds""" ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(_lowerCAmelCase , _lowerCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
77
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE lowercase__ : int = 'config.json' lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin' lowercase__ : List[str] = 'diffusion_flax_model.msgpack' lowercase__ : str = 'model.onnx' lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors' lowercase__ : List[str] = 'weights.pb' lowercase__ : str = 'https://huggingface.co' lowercase__ : str = default_cache_path lowercase__ : Optional[int] = 'diffusers_modules' lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules')) lowercase__ : Tuple = ['fp16', 'non-ema'] lowercase__ : int = '.self_attn'
324
0
"""simple docstring""" def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): return round(float(moles / volume ) * nfactor ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) ) def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ): return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
78
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : str = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def a__ ( lowercase : str ) -> Dict: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase = k.replace(lowercase, lowercase ) if k.startswith('''encoder''' ): _UpperCamelCase = k.replace('''.attn''', '''.self_attn''' ) _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' ) return k def a__ ( lowercase : List[str] ) -> List[Any]: """simple docstring""" _UpperCamelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _UpperCamelCase = sd.pop(lowercase ) _UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd _UpperCamelCase = v lowercase__ : str = ['START'] @torch.no_grad() def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict: """simple docstring""" _UpperCamelCase = torch.load(lowercase, map_location='''cpu''' ) _UpperCamelCase = model['''model'''] _UpperCamelCase = BlenderbotConfig.from_json_file(lowercase ) _UpperCamelCase = BlenderbotForConditionalGeneration(lowercase ) _UpperCamelCase = m.model.state_dict().keys() _UpperCamelCase = [] _UpperCamelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase = rename_state_dict_key(lowercase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowercase ) m.model.load_state_dict(lowercase, strict=lowercase ) m.half() m.save_pretrained(lowercase ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
324
0
'''simple docstring''' from __future__ import annotations def __lowercase ( __lowercase , __lowercase = None , __lowercase = None ) -> None: '''simple docstring''' if start is None: _A = 0 if end is None: _A = len(__lowercase ) - 1 if start >= end: return _A = (start + end) // 2 slowsort(__lowercase , __lowercase , __lowercase ) slowsort(__lowercase , mid + 1 , __lowercase ) if sequence[end] < sequence[mid]: _A , _A = sequence[mid], sequence[end] slowsort(__lowercase , __lowercase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
79
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Tuple = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _UpperCamelCase ( ) -> List[Any]: '''simple docstring''' UpperCamelCase__ = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg" UpperCamelCase__ = Image.open(requests.get(__A , stream=__A ).raw ).convert("RGB" ) return image def _UpperCamelCase ( __A ) -> List[str]: '''simple docstring''' UpperCamelCase__ = [] # fmt: off # vision encoder rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") ) rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") ) rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") ) rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") ) rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") ) rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") ) rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") ) # fmt: on return rename_keys def _UpperCamelCase ( __A , __A , __A ) -> int: '''simple docstring''' UpperCamelCase__ = dct.pop(__A ) UpperCamelCase__ = val def _UpperCamelCase ( __A , __A ) -> Optional[Any]: '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases UpperCamelCase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) UpperCamelCase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict UpperCamelCase__ = torch.cat((q_bias, torch.zeros_like(__A , requires_grad=__A ), v_bias) ) UpperCamelCase__ = qkv_bias def _UpperCamelCase ( __A ) -> Dict: '''simple docstring''' UpperCamelCase__ = 364 if "coco" in model_name else 224 UpperCamelCase__ = InstructBlipVisionConfig(image_size=__A ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: UpperCamelCase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: UpperCamelCase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: UpperCamelCase__ = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: UpperCamelCase__ = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32001 ).to_dict() else: raise ValueError("Model name not supported" ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 UpperCamelCase__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() UpperCamelCase__ = InstructBlipConfig(vision_config=__A , text_config=__A , qformer_config=__A ) return config, image_size @torch.no_grad() def _UpperCamelCase ( __A , __A=None , __A=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" ) qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} ) if "t5" in model_name: UpperCamelCase__ = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) UpperCamelCase__ = LlamaTokenizerFast.from_pretrained( "huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" ) tokenizer.add_special_tokens({"pad_token": "[PAD]"} ) UpperCamelCase__ , UpperCamelCase__ = get_blipa_config(__A ) UpperCamelCase__ = InstructBlipForConditionalGeneration(__A ).eval() UpperCamelCase__ = { "instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"), "instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"), "instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"), "instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"), } UpperCamelCase__ , UpperCamelCase__ = model_name_to_original[model_name] # load original model print("Loading original model..." ) UpperCamelCase__ = "cuda:1" if torch.cuda.is_available() else "cpu" UpperCamelCase__ = "cuda:2" if torch.cuda.is_available() else "cpu" UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = load_model_and_preprocess( name=__A , model_type=__A , is_eval=__A , device=__A ) original_model.eval() print("Done!" ) # update state dict keys UpperCamelCase__ = original_model.state_dict() UpperCamelCase__ = create_rename_keys(__A ) for src, dest in rename_keys: rename_key(__A , __A , __A ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): UpperCamelCase__ = state_dict.pop(__A ) if key.startswith("Qformer.bert" ): UpperCamelCase__ = key.replace("Qformer.bert" , "qformer" ) if "attention.self" in key: UpperCamelCase__ = key.replace("self" , "attention" ) if "llm_proj" in key: UpperCamelCase__ = key.replace("llm_proj" , "language_projection" ) if "t5_proj" in key: UpperCamelCase__ = key.replace("t5_proj" , "language_projection" ) if key.startswith("llm_model" ): UpperCamelCase__ = key.replace("llm_model" , "language_model" ) if key.startswith("t5" ): UpperCamelCase__ = key.replace("t5" , "language" ) UpperCamelCase__ = val # read in qv biases read_in_q_v_bias(__A , __A ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(__A , strict=__A ) UpperCamelCase__ = load_demo_image() UpperCamelCase__ = "What is unusual about this image?" # create processor UpperCamelCase__ = BlipImageProcessor( size={"height": image_size, "width": image_size} , image_mean=__A , image_std=__A ) UpperCamelCase__ = InstructBlipProcessor( image_processor=__A , tokenizer=__A , qformer_tokenizer=__A , ) UpperCamelCase__ = processor(images=__A , text=__A , return_tensors="pt" ).to(__A ) # make sure processor creates exact same pixel values UpperCamelCase__ = vis_processors["eval"](__A ).unsqueeze(0 ).to(__A ) UpperCamelCase__ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __A ) original_model.to(__A ) hf_model.to(__A ) with torch.no_grad(): if "vicuna" in model_name: UpperCamelCase__ = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits UpperCamelCase__ = hf_model(**__A ).logits else: UpperCamelCase__ = original_model( {"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits UpperCamelCase__ = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__A ) UpperCamelCase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) UpperCamelCase__ = hf_model(**__A , labels=__A ).logits print("First values of original logits:" , original_logits[0, :3, :3] ) print("First values of HF logits:" , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape UpperCamelCase__ = 1E-4 if "vicuna" in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , __A , atol=__A ) print("Looks ok!" ) print("Generating with original model..." ) UpperCamelCase__ = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print("Generating with HF model..." ) UpperCamelCase__ = hf_model.generate( **__A , do_sample=__A , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? UpperCamelCase__ = 2 print("Original generation:" , __A ) UpperCamelCase__ = processor.batch_decode(__A , skip_special_tokens=__A ) UpperCamelCase__ = [text.strip() for text in output_text] print("HF generation:" , __A ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__A ) hf_model.save_pretrained(__A ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a__ : int = argparse.ArgumentParser() a__ : int = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) a__ : int = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
80
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowercase__ : Any = logging.get_logger(__name__) class __lowerCAmelCase : """simple docstring""" _snake_case : List[str] = None @experimental def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int: """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]: """simple docstring""" _UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase ) _UpperCamelCase = [] # We organize the splits ourselve (contiguous splits) for index in range(lowercase ): _UpperCamelCase = len(lowercase ) // num_proc _UpperCamelCase = len(lowercase ) % num_proc _UpperCamelCase = div * index + min(lowercase, lowercase ) _UpperCamelCase = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(lowercase )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) _UpperCamelCase , _UpperCamelCase = None, None if not disable_tqdm: _UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool: _UpperCamelCase = pool.map(lowercase, lowercase ) logger.info(F"""Finished {num_proc} processes""" ) _UpperCamelCase = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(lowercase )} objects""" ) return mapped def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any: """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ): return joblib.Parallel()( joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def a__ ( lowercase : str ) -> Optional[int]: """simple docstring""" _UpperCamelCase = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: _UpperCamelCase = None
324
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" __lowerCAmelCase = StableDiffusionDiffEditPipeline __lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} __lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} __lowerCAmelCase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess __lowerCAmelCase = frozenset([] ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: torch.manual_seed(0 ) a =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__A , ) a =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) a =DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_zero=__A , ) torch.manual_seed(0 ) a =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) a =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) a =CLIPTextModel(__A ) a =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) a ={ '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> str: a =floats_tensor((1, 16, 16) , rng=random.Random(__A ) ).to(__A ) a =floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__A ) ).to(__A ) if str(__A ).startswith('''mps''' ): a =torch.manual_seed(__A ) else: a =torch.Generator(device=__A ).manual_seed(__A ) a ={ '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> Optional[Any]: a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) a =image.cpu().permute(0 , 2 , 3 , 1 )[0] a =Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ) if str(__A ).startswith('''mps''' ): a =torch.manual_seed(__A ) else: a =torch.Generator(device=__A ).manual_seed(__A ) a ={ '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> str: a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) a =image.cpu().permute(0 , 2 , 3 , 1 )[0] a =Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ) if str(__A ).startswith('''mps''' ): a =torch.manual_seed(__A ) else: a =torch.Generator(device=__A ).manual_seed(__A ) a ={ '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self ) -> List[str]: if not hasattr(self.pipeline_class , '''_optional_components''' ): return a =self.get_dummy_components() a =self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(__A , __A , __A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) a =self.get_dummy_inputs(__A ) a =pipe(**__A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(__A ) a =self.pipeline_class.from_pretrained(__A ) pipe_loaded.to(__A ) pipe_loaded.set_progress_bar_config(disable=__A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(__A , __A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) a =self.get_dummy_inputs(__A ) a =pipe_loaded(**__A )[0] a =np.abs(output - output_loaded ).max() self.assertLess(__A , 1E-4 ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: a ='''cpu''' a =self.get_dummy_components() a =self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) a =self.get_dummy_mask_inputs(__A ) a =pipe.generate_mask(**__A ) a =mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) a =np.array([0] * 9 ) a =np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(__A , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def SCREAMING_SNAKE_CASE ( self ) -> Any: a ='''cpu''' a =self.get_dummy_components() a =self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) a =self.get_dummy_inversion_inputs(__A ) a =pipe.invert(**__A ).images a =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) a =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) a =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__A , 1E-3 ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: a ='''cpu''' a =self.get_dummy_components() a ={'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} a =DPMSolverMultistepScheduler(**__A ) a =DPMSolverMultistepInverseScheduler(**__A ) a =self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) a =self.get_dummy_inversion_inputs(__A ) a =pipe.invert(**__A ).images a =image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) a =np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) a =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__A , 1E-3 ) @require_torch_gpu @slow class __A ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def SCREAMING_SNAKE_CASE ( cls ) -> List[Any]: a =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) a =raw_image.convert('''RGB''' ).resize((768, 768) ) a =raw_image def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a =torch.manual_seed(0 ) a =StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=__A , torch_dtype=torch.floataa ) a =DDIMScheduler.from_config(pipe.scheduler.config ) a =DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) a ='''a bowl of fruit''' a ='''a bowl of pears''' a =pipe.generate_mask( image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , ) a =pipe.invert( prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A ).latents a =pipe( prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] a =( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =torch.manual_seed(0 ) a =StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=__A , torch_dtype=torch.floataa ) a =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) a =DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__A ) a ='''a bowl of fruit''' a ='''a bowl of pears''' a =pipe.generate_mask( image=self.raw_image , source_prompt=__A , target_prompt=__A , generator=__A , ) a =pipe.invert( prompt=__A , image=self.raw_image , inpaint_strength=0.7 , generator=__A , num_inference_steps=25 , ).latents a =pipe( prompt=__A , mask_image=__A , image_latents=__A , generator=__A , negative_prompt=__A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] a =( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
81
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str: '''simple docstring''' if not batched: _UpperCamelCase = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): _UpperCamelCase , _UpperCamelCase = image.size else: _UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2] if w < h: _UpperCamelCase = int(self.size['''shortest_edge'''] * h / w ) _UpperCamelCase = self.size['''shortest_edge'''] elif w > h: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = self.size['''shortest_edge'''] else: _UpperCamelCase = [] for image in image_inputs: _UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = DeformableDetrImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) _UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' pass def snake_case__ ( self : int ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : str ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them _UpperCamelCase = DeformableDetrImageProcessor() _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) ) @slow def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} _UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify masks _UpperCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
324
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) A__ = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["""CLIPFeatureExtractor"""] A__ = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
82
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase__ : str = None lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase__ : int = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase__ : Optional[int] = { 'google/rembert': 2_56, } lowercase__ : str = '▁' class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : str = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Dict = RemBertTokenizer def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCamelCase = do_lower_case _UpperCamelCase = remove_space _UpperCamelCase = keep_accents _UpperCamelCase = vocab_file _UpperCamelCase = False if not self.vocab_file else True def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
324
0
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : Optional[Any] = logging.get_logger(__name__) snake_case_ : int = { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json' ), } class lowercase__ ( lowercase ): lowercase__ = """xlm-prophetnet""" lowercase__ = ["""past_key_values"""] lowercase__ = { """num_attention_heads""": """num_encoder_attention_heads""", } def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,): '''simple docstring''' _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : str = encoder_ffn_dim _UpperCamelCase : List[Any] = num_encoder_layers _UpperCamelCase : Tuple = num_encoder_attention_heads _UpperCamelCase : Optional[int] = decoder_ffn_dim _UpperCamelCase : List[Any] = num_decoder_layers _UpperCamelCase : List[Any] = num_decoder_attention_heads _UpperCamelCase : Optional[Any] = max_position_embeddings _UpperCamelCase : str = init_std # Normal(0, this parameter) _UpperCamelCase : List[str] = activation_function # parameters for xlmprophetnet _UpperCamelCase : Tuple = ngram _UpperCamelCase : Optional[Any] = num_buckets _UpperCamelCase : Tuple = relative_max_distance _UpperCamelCase : str = disable_ngram_loss _UpperCamelCase : str = eps # 3 Types of Dropout _UpperCamelCase : Union[str, Any] = attention_dropout _UpperCamelCase : str = activation_dropout _UpperCamelCase : List[str] = dropout _UpperCamelCase : Tuple = use_cache super().__init__( pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,) @property def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ): '''simple docstring''' raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and' ' `num_decoder_layers`.' )
83
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : str = logging.get_logger(__name__) lowercase__ : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = 'deformable_detr' _snake_case : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = backbone_config.get('''model_type''' ) _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowerCAmelCase__ ) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = max_position_embeddings _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # deformable attributes _UpperCamelCase = num_feature_levels _UpperCamelCase = encoder_n_points _UpperCamelCase = decoder_n_points _UpperCamelCase = two_stage _UpperCamelCase = two_stage_num_proposals _UpperCamelCase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient _UpperCamelCase = focal_alpha _UpperCamelCase = disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self : int ) -> int: '''simple docstring''' return self.d_model def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
324
0
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _snake_case ( lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ :str = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
84
'''simple docstring''' from __future__ import annotations def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]: """simple docstring""" _UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary _UpperCamelCase = frequencies_dict if not case_sensitive: _UpperCamelCase = ciphertext.lower() # Chi squared statistic values _UpperCamelCase = {} # cycle through all of the shifts for shift in range(len(lowercase ) ): _UpperCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.lower().count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCamelCase = min( lowercase, key=lowercase, ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
324
0
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) class _snake_case ( lowercase_ ): lowerCAmelCase_ : List[Any] = ["input_values", "padding_mask"] def __init__( self , a__ = 1 , a__ = 24_000 , a__ = 0.0 , a__ = None , a__ = None , **a__ , ) -> Any: '''simple docstring''' super().__init__(feature_size=a__ , sampling_rate=a__ , padding_value=a__ , **a__ ) snake_case_ = chunk_length_s snake_case_ = overlap @property def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , a__ , a__ = None , a__ = False , a__ = None , a__ = None , a__ = None , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs snake_case_ = True snake_case_ = bool( isinstance(a__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ = [np.asarray(a__ , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(a__ , np.ndarray ): snake_case_ = np.asarray(a__ , dtype=np.floataa ) elif isinstance(a__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ = [np.asarray(a__ ).T] # verify inputs are valid for idx, example in enumerate(a__ ): if example.ndim > 2: raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' ) snake_case_ = None snake_case_ = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ = min(array.shape[0] for array in raw_audio ) snake_case_ = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ = max(array.shape[0] for array in raw_audio ) snake_case_ = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ = "max_length" else: snake_case_ = input_values # normal padding on batch if padded_inputs is None: snake_case_ = self.pad( a__ , max_length=a__ , truncation=a__ , padding=a__ , return_attention_mask=a__ , ) if padding: snake_case_ = padded_inputs.pop("attention_mask" ) snake_case_ = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: snake_case_ = example[..., None] input_values.append(example.T ) snake_case_ = input_values if return_tensors is not None: snake_case_ = padded_inputs.convert_to_tensors(a__ ) return padded_inputs
85
'''simple docstring''' import math def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list: """simple docstring""" _UpperCamelCase = end or len(lowercase ) for i in range(lowercase, lowercase ): _UpperCamelCase = i _UpperCamelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCamelCase = array[temp_index - 1] temp_index -= 1 _UpperCamelCase = temp_index_value return array def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap """simple docstring""" _UpperCamelCase = index _UpperCamelCase = 2 * index + 1 # Left Node _UpperCamelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCamelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCamelCase = right_index if largest != index: _UpperCamelCase , _UpperCamelCase = array[largest], array[index] heapify(lowercase, lowercase, lowercase ) def a__ ( lowercase : list ) -> list: """simple docstring""" _UpperCamelCase = len(lowercase ) for i in range(n // 2, -1, -1 ): heapify(lowercase, lowercase, lowercase ) for i in range(n - 1, 0, -1 ): _UpperCamelCase , _UpperCamelCase = array[0], array[i] heapify(lowercase, 0, lowercase ) return array def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" _UpperCamelCase = low _UpperCamelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCamelCase , _UpperCamelCase = array[j], array[i] i += 1 def a__ ( lowercase : list ) -> list: """simple docstring""" if len(lowercase ) == 0: return array _UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) ) _UpperCamelCase = 16 return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase ) def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 _UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 ) _UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase ) intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase ) _UpperCamelCase = p return insertion_sort(lowercase, lowercase, lowercase ) if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Any = input('Enter numbers separated by a comma : ').strip() lowercase__ : Any = [float(item) for item in user_input.split(',')] print(sort(unsorted))
324
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase__ = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
'''simple docstring''' import os import numpy import onnx def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]: """simple docstring""" _UpperCamelCase = a.name _UpperCamelCase = b.name _UpperCamelCase = '''''' _UpperCamelCase = '''''' _UpperCamelCase = a == b _UpperCamelCase = name_a _UpperCamelCase = name_b return res def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase, lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) _graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowercase, lowercase, lowercase ) def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple: """simple docstring""" _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _UpperCamelCase = inits[i].name _UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph, lowercase, lowercase ) def a__ ( lowercase : Dict ) -> Dict: """simple docstring""" _UpperCamelCase = os.path.dirname(lowercase ) _UpperCamelCase = os.path.basename(lowercase ) _UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) ) _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = set() _UpperCamelCase = {} _UpperCamelCase = [] _UpperCamelCase = 0 for i in range(len(lowercase ) ): if i in dup_set: continue for j in range(i + 1, len(lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i], inits[j] ): dup_set.add(lowercase ) dup_set.add(lowercase ) _UpperCamelCase = inits[j].data_type _UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''', lowercase ) total_reduced_size += mem_size _UpperCamelCase = inits[i].name _UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase ) else: _UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' ) _UpperCamelCase = sorted(lowercase ) _remove_dup_initializers_from_model(lowercase, lowercase, lowercase ) _UpperCamelCase = '''optimized_''' + model_file_name _UpperCamelCase = os.path.join(lowercase, lowercase ) onnx.save(lowercase, lowercase ) return new_model
324
0
import argparse import os import re UpperCamelCase = '''src/transformers''' # Pattern that looks at the indentation in a line. UpperCamelCase = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. UpperCamelCase = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCamelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. UpperCamelCase = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCamelCase = re.compile(R'''\[([^\]]+)\]''') def lowercase_ ( _lowerCamelCase : int): lowercase__ : str = _re_indent.search(_lowerCamelCase) return "" if search is None else search.groups()[0] def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None): lowercase__ : Optional[Any] = 0 lowercase__ : Optional[int] = code.split("\n") if start_prompt is not None: while not lines[index].startswith(_lowerCamelCase): index += 1 lowercase__ : str = ["\n".join(lines[:index])] else: lowercase__ : List[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowercase__ : Any = [lines[index]] index += 1 while index < len(_lowerCamelCase) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase)): if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: if len(_lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): current_block.append(lines[index]) blocks.append("\n".join(_lowerCamelCase)) if index < len(_lowerCamelCase) - 1: lowercase__ : List[str] = [lines[index + 1]] index += 1 else: lowercase__ : List[str] = [] else: blocks.append("\n".join(_lowerCamelCase)) lowercase__ : List[str] = [lines[index]] else: current_block.append(lines[index]) index += 1 # Adds current block if it's nonempty. if len(_lowerCamelCase) > 0: blocks.append("\n".join(_lowerCamelCase)) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCamelCase): blocks.append("\n".join(lines[index:])) return blocks def lowercase_ ( _lowerCamelCase : Optional[int]): def _inner(_lowerCamelCase : str): return key(_lowerCamelCase).lower().replace("_" , "") return _inner def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None): # If no key is provided, we use a noop. def noop(_lowerCamelCase : str): return x if key is None: lowercase__ : Any = noop # Constants are all uppercase, they go first. lowercase__ : Tuple = [obj for obj in objects if key(_lowerCamelCase).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowercase__ : str = [obj for obj in objects if key(_lowerCamelCase)[0].isupper() and not key(_lowerCamelCase).isupper()] # Functions begin with a lowercase, they go last. lowercase__ : Any = [obj for obj in objects if not key(_lowerCamelCase)[0].isupper()] lowercase__ : Dict = ignore_underscore(_lowerCamelCase) return sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): # This inner function sort imports between [ ]. def _replace(_lowerCamelCase : List[Any]): lowercase__ : Optional[Any] = match.groups()[0] if "," not in imports: return f'''[{imports}]''' lowercase__ : Optional[int] = [part.strip().replace("\"" , "") for part in imports.split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: lowercase__ : Optional[int] = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) + "]" lowercase__ : List[Any] = import_statement.split("\n") if len(_lowerCamelCase) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowercase__ : Dict = 2 if lines[1].strip() == "[" else 1 lowercase__ : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])] lowercase__ : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase: x[1]) lowercase__ : List[str] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:]) elif len(_lowerCamelCase) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1]) is not None: lowercase__ : Any = _re_bracket_content.sub(_replace , lines[1]) else: lowercase__ : List[Any] = [part.strip().replace("\"" , "") for part in lines[1].split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: lowercase__ : Optional[Any] = keys[:-1] lowercase__ : Optional[Any] = get_indent(lines[1]) + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) return "\n".join(_lowerCamelCase) else: # Finally we have to deal with imports fitting on one line lowercase__ : Any = _re_bracket_content.sub(_replace , _lowerCamelCase) return import_statement def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True): with open(_lowerCamelCase , encoding="utf-8") as f: lowercase__ : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowercase__ : List[Any] = split_code_in_indented_blocks( _lowerCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:") # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCamelCase) - 1): # Check if the block contains some `_import_structure`s thingy to sort. lowercase__ : Optional[int] = main_blocks[block_idx] lowercase__ : Any = block.split("\n") # Get to the start of the imports. lowercase__ : int = 0 while line_idx < len(_lowerCamelCase) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowercase__ : List[str] = len(_lowerCamelCase) else: line_idx += 1 if line_idx >= len(_lowerCamelCase): continue # Ignore beginning and last line: they don't contain anything. lowercase__ : str = "\n".join(block_lines[line_idx:-1]) lowercase__ : Optional[Any] = get_indent(block_lines[1]) # Slit the internal block into blocks of indent level 1. lowercase__ : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase) # We have two categories of import key: list or _import_structure[key].append/extend lowercase__ : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowercase__ : Dict = [(pattern.search(_lowerCamelCase).groups()[0] if pattern.search(_lowerCamelCase) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowercase__ : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase) if key is not None] lowercase__ : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowercase__ : Tuple = 0 lowercase__ : Tuple = [] for i in range(len(_lowerCamelCase)): if keys[i] is None: reorderded_blocks.append(internal_blocks[i]) else: lowercase__ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]]) reorderded_blocks.append(_lowerCamelCase) count += 1 # And we put our main block back together with its first and last line. lowercase__ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]]) if code != "\n".join(_lowerCamelCase): if check_only: return True else: print(f'''Overwriting {file}.''') with open(_lowerCamelCase , "w" , encoding="utf-8") as f: f.write("\n".join(_lowerCamelCase)) def lowercase_ ( _lowerCamelCase : List[Any]=True): lowercase__ : Optional[int] = [] for root, _, files in os.walk(_lowerCamelCase): if "__init__.py" in files: lowercase__ : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , "__init__.py") , check_only=_lowerCamelCase) if result: lowercase__ : List[str] = [os.path.join(_lowerCamelCase , "__init__.py")] if len(_lowerCamelCase) > 0: raise ValueError(f'''Would overwrite {len(_lowerCamelCase)} files, run `make style`.''') if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') UpperCamelCase = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
87
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase__ : List[Any] = 25_00_04 lowercase__ : str = 25_00_20 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MBartTokenizer _snake_case : Tuple = MBartTokenizerFast _snake_case : List[str] = True _snake_case : Optional[Any] = True def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Dict = 'facebook/mbart-large-en-ro' _snake_case : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _snake_case : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case__ ( cls : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCamelCase = 1 return cls def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def snake_case__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) _UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCAmelCase__ ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case__ ( self : Tuple ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 250004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
324
0
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) __lowerCAmelCase : Any = _symbol_database.Default() __lowerCAmelCase : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) __lowerCAmelCase : Tuple = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: __lowerCAmelCase : Any = None __lowerCAmelCase : Any = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" __lowerCAmelCase : str = 45 __lowerCAmelCase : List[str] = 1581 __lowerCAmelCase : Optional[int] = 1517 __lowerCAmelCase : List[Any] = 1570 __lowerCAmelCase : List[Any] = 1584 __lowerCAmelCase : Optional[int] = 1793 __lowerCAmelCase : List[str] = 1795 __lowerCAmelCase : str = 1916 __lowerCAmelCase : int = 1864 __lowerCAmelCase : List[Any] = 1905 __lowerCAmelCase : Optional[int] = 1919 __lowerCAmelCase : Dict = 2429 __lowerCAmelCase : Optional[Any] = 2208 __lowerCAmelCase : Optional[int] = 2418 __lowerCAmelCase : List[Any] = 2323 __lowerCAmelCase : List[Any] = 2407 # @@protoc_insertion_point(module_scope)
88
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase__ : str = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Union[str, Any] = ['pixel_values'] def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 256} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray: '''simple docstring''' return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]: '''simple docstring''' _UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase__ ): _UpperCamelCase = target_sizes.numpy() _UpperCamelCase = [] for idx in range(len(lowerCAmelCase__ ) ): _UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ ) _UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase__ ) else: _UpperCamelCase = logits.argmax(dim=1 ) _UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
324
0
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class __magic_name__ : def __init__( self : Dict ): _a : list[Any] = [] _a : int = 0 _a : int = 0 def __lowercase ( self : Optional[Any] ): return self.head == self.tail def __lowercase ( self : List[Any] ,_UpperCAmelCase : Any ): self.data.append(_UpperCAmelCase ) _a : Dict = self.tail + 1 def __lowercase ( self : int ): _a : str = self.data[self.head] _a : List[Any] = self.head + 1 return ret def __lowercase ( self : Dict ): return self.tail - self.head def __lowercase ( self : List[Any] ): print(self.data ) print('**************' ) print(self.data[self.head : self.tail] ) class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : Any ): _a : Dict = data _a : MyNode | None = None _a : MyNode | None = None _a : int = 1 def __lowercase ( self : Dict ): return self.data def __lowercase ( self : Tuple ): return self.left def __lowercase ( self : Optional[Any] ): return self.right def __lowercase ( self : Optional[Any] ): return self.height def __lowercase ( self : str ,_UpperCAmelCase : Any ): _a : str = data def __lowercase ( self : str ,_UpperCAmelCase : MyNode | None ): _a : Tuple = node def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : MyNode | None ): _a : Tuple = node def __lowercase ( self : Dict ,_UpperCAmelCase : int ): _a : Dict = height def __lowerCamelCase ( lowerCAmelCase_ ) -> int: if node is None: return 0 return node.get_height() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if a > b: return a return b def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode: print('left rotation node:' , node.get_data() ) _a : str = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(lowerCAmelCase_ ) _a : Any = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCAmelCase_ ) _a : str = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowerCAmelCase_ ) return ret def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode: print('right rotation node:' , node.get_data() ) _a : Optional[Any] = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(lowerCAmelCase_ ) _a : Tuple = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCAmelCase_ ) _a : List[str] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowerCAmelCase_ ) return ret def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode: _a : Union[str, Any] = node.get_left() assert left_child is not None node.set_left(left_rotation(lowerCAmelCase_ ) ) return right_rotation(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ ) -> MyNode: _a : str = node.get_right() assert right_child is not None node.set_right(right_rotation(lowerCAmelCase_ ) ) return left_rotation(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> MyNode | None: if node is None: return MyNode(lowerCAmelCase_ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , lowerCAmelCase_ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected _a : Any = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child _a : Union[str, Any] = right_rotation(lowerCAmelCase_ ) else: _a : List[str] = lr_rotation(lowerCAmelCase_ ) else: node.set_right(insert_node(node.get_right() , lowerCAmelCase_ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: _a : str = node.get_right() assert right_child is not None if data < right_child.get_data(): _a : Any = rl_rotation(lowerCAmelCase_ ) else: _a : str = left_rotation(lowerCAmelCase_ ) _a : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCAmelCase_ ) return node def __lowerCamelCase ( lowerCAmelCase_ ) -> Any: while True: _a : Tuple = root.get_right() if right_child is None: break _a : Any = right_child return root.get_data() def __lowerCamelCase ( lowerCAmelCase_ ) -> Any: while True: _a : Any = root.get_left() if left_child is None: break _a : Optional[int] = left_child return root.get_data() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> MyNode | None: _a : Optional[int] = root.get_left() _a : Optional[int] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: _a : int = get_left_most(lowerCAmelCase_ ) root.set_data(lowerCAmelCase_ ) root.set_right(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) ) elif left_child is not None: _a : Optional[int] = left_child elif right_child is not None: _a : Optional[int] = right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(lowerCAmelCase_ , lowerCAmelCase_ ) ) if get_height(lowerCAmelCase_ ) - get_height(lowerCAmelCase_ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): _a : Union[str, Any] = left_rotation(lowerCAmelCase_ ) else: _a : Dict = rl_rotation(lowerCAmelCase_ ) elif get_height(lowerCAmelCase_ ) - get_height(lowerCAmelCase_ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): _a : Any = right_rotation(lowerCAmelCase_ ) else: _a : int = lr_rotation(lowerCAmelCase_ ) _a : Tuple = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(lowerCAmelCase_ ) return root class __magic_name__ : def __init__( self : Tuple ): _a : MyNode | None = None def __lowercase ( self : List[Any] ): return get_height(self.root ) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ): print('insert:' + str(_UpperCAmelCase ) ) _a : List[Any] = insert_node(self.root ,_UpperCAmelCase ) def __lowercase ( self : Any ,_UpperCAmelCase : Any ): print('delete:' + str(_UpperCAmelCase ) ) if self.root is None: print('Tree is empty!' ) return _a : Union[str, Any] = del_node(self.root ,_UpperCAmelCase ) def __str__( self : int ,): # a level traversale, gives a more intuitive look on the tree _a : Optional[Any] = '' _a : Tuple = MyQueue() q.push(self.root ) _a : Dict = self.get_height() if layer == 0: return output _a : int = 0 while not q.is_empty(): _a : List[str] = q.pop() _a : Optional[int] = ' ' * int(math.pow(2 ,layer - 1 ) ) output += space if node is None: output += "*" q.push(_UpperCAmelCase ) q.push(_UpperCAmelCase ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space _a : Dict = cnt + 1 for i in range(100 ): if cnt == math.pow(2 ,_UpperCAmelCase ) - 1: _a : List[Any] = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def __lowerCamelCase ( ) -> None: import doctest doctest.testmod() if __name__ == "__main__": _test() __lowerCAmelCase = AVLtree() __lowerCAmelCase = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
89
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : jnp.ndarray @flax_register_to_config class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : int = 3_2 _snake_case : int = 4 _snake_case : int = 4 _snake_case : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _snake_case : Union[bool, Tuple[bool]] = False _snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _snake_case : int = 2 _snake_case : Union[int, Tuple[int]] = 8 _snake_case : Optional[Union[int, Tuple[int]]] = None _snake_case : int = 1_2_8_0 _snake_case : float = 0.0 _snake_case : bool = False _snake_case : jnp.dtype = jnp.floataa _snake_case : bool = True _snake_case : int = 0 _snake_case : bool = False def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"] def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype ) _UpperCamelCase = self.only_cross_attention if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = down_blocks # mid _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCamelCase = [] _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = output_channel _UpperCamelCase = up_blocks # out _UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 ) _UpperCamelCase = self.time_proj(lowerCAmelCase__ ) _UpperCamelCase = self.time_embedding(lowerCAmelCase__ ) # 2. pre-process _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(lowerCAmelCase__ ) # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( lowerCAmelCase__ , lowerCAmelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = new_down_block_res_samples # 4. mid _UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = up_block( lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , ) else: _UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train ) # 6. post-process _UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ ) _UpperCamelCase = nn.silu(lowerCAmelCase__ ) _UpperCamelCase = self.conv_out(lowerCAmelCase__ ) _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
324
0
from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets __A = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n" __A = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n" __A = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n" def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> Optional[int]: """simple docstring""" return float((preds == labels).mean() ) def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Any: """simple docstring""" __lowerCamelCase = simple_accuracy(UpperCamelCase__ , UpperCamelCase__ ) __lowerCamelCase = float(fa_score(y_true=UpperCamelCase__ , y_pred=UpperCamelCase__ ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> Any: """simple docstring""" __lowerCamelCase = float(pearsonr(UpperCamelCase__ , UpperCamelCase__ )[0] ) __lowerCamelCase = float(spearmanr(UpperCamelCase__ , UpperCamelCase__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): """simple docstring""" def lowercase_ ( self ) -> Dict: '''simple docstring''' if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), 'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' , ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: '''simple docstring''' if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ )} elif self.config_name == "stsb": return pearson_and_spearman(lowerCamelCase__ , lowerCamelCase__ ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(lowerCamelCase__ , lowerCamelCase__ ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["sst2", "mnli", "mnli_mismatched", "mnli_matched", ' '"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
90
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase__ : List[str] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase__ : Dict = logging.getLogger() def a__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int: """simple docstring""" _UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: return json.load(lowercase ) raise ValueError(F"""can't find {path}""" ) lowercase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def snake_case__ ( self : Any ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_glue.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def snake_case__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_clm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def snake_case__ ( self : Tuple ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_summarization_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def snake_case__ ( self : str ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_ta_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_ner.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_qa.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
324
0
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _A (__a , __a ) -> Dict: """simple docstring""" assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _A (__a , __a , __a ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def _A (__a , __a , __a ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE_ : List[str] = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE_ : int = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE_ : Dict = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _A (__a , __a , __a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE_ : Optional[Any] = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_parquet_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def _A (__a , __a , __a ) -> Dict: """simple docstring""" if issubclass(__a , __a ): SCREAMING_SNAKE_CASE_ : Optional[Any] = parquet_path elif issubclass(__a , __a ): SCREAMING_SNAKE_CASE_ : int = [parquet_path] SCREAMING_SNAKE_CASE_ : Dict = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE_ : Optional[Any] = ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_dataset(__a , __a ) def _A (__a , __a , __a=("train",) ) -> Any: """simple docstring""" assert isinstance(__a , __a ) for split in splits: SCREAMING_SNAKE_CASE_ : Any = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _A (__a , __a , __a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): SCREAMING_SNAKE_CASE_ : int = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def _A (__a , __a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE_ : List[Any] = features.copy() if features else default_expected_features SCREAMING_SNAKE_CASE_ : str = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) SCREAMING_SNAKE_CASE_ : Dict = ParquetDatasetReader({'''train''': parquet_path} , features=__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _A (__a , __a , __a ) -> Dict: """simple docstring""" if split: SCREAMING_SNAKE_CASE_ : Any = {split: parquet_path} else: SCREAMING_SNAKE_CASE_ : Dict = '''train''' SCREAMING_SNAKE_CASE_ : List[str] = {'''train''': parquet_path, '''test''': parquet_path} SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / '''cache''' SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} SCREAMING_SNAKE_CASE_ : Tuple = ParquetDatasetReader(__a , cache_dir=__a ).read() _check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _A (__a , __a ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 SCREAMING_SNAKE_CASE_ : Any = pq.ParquetFile(tmp_path / '''foo.parquet''' ) SCREAMING_SNAKE_CASE_ : Tuple = pf.read() assert dataset.data.table == output_table def _A (__a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = str(shared_datadir / '''test_image_rgb.jpg''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = {'''image''': [image_path]} SCREAMING_SNAKE_CASE_ : Dict = Features({'''image''': Image()} ) SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_dict(__a , features=__a ) SCREAMING_SNAKE_CASE_ : Any = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 SCREAMING_SNAKE_CASE_ : Any = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features SCREAMING_SNAKE_CASE_ : str = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__a ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def _A (__a , __a ) -> str: """simple docstring""" assert get_writer_batch_size(__a ) == expected
91
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ : Optional[Any] = logging.getLogger() def a__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Dict ) -> int: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = os.path.join(lowercase, '''all_results.json''' ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: _UpperCamelCase = json.load(lowercase ) else: raise ValueError(F"""can't find {path}""" ) return results def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod def snake_case__ ( cls : Optional[int] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Tuple ) -> int: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : int ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) ) @slow def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCAmelCase__ ) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
324
0
def _a ( SCREAMING_SNAKE_CASE_ : int ): # noqa: E741 __lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = 0 __lowerCAmelCase = [0] * n __lowerCAmelCase = [False] * n __lowerCAmelCase = [False] * n def dfs(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ): if parent == root: out_edge_count += 1 __lowerCAmelCase = True __lowerCAmelCase = at for to in l[at]: if to == parent: pass elif not visited[to]: __lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __lowerCAmelCase = True # AP found via cycle if at == low[to]: __lowerCAmelCase = True else: __lowerCAmelCase = min(low[at] , SCREAMING_SNAKE_CASE_ ) return out_edge_count for i in range(SCREAMING_SNAKE_CASE_ ): if not visited[i]: __lowerCAmelCase = 0 __lowerCAmelCase = dfs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = out_edge_count > 1 for x in range(len(SCREAMING_SNAKE_CASE_ ) ): if is_art[x] is True: print(SCREAMING_SNAKE_CASE_ ) # Adjacency list of graph UpperCamelCase__ = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
92
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" _UpperCamelCase = iter(lowercase ) while True: _UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) ) if not chunk: return yield chunk def a__ ( lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCamelCase = '''''' if len(lowercase ) < 2: return dirty for i in range(len(lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase ) & 1: clean += "X" return clean def a__ ( lowercase : str ) -> list[str]: """simple docstring""" _UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCamelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase ) return table def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = prepare_input(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
324
0
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): @property def _snake_case ( self ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[int] = ort.SessionOptions() lowercase_ : List[Any] = False return options def _snake_case ( self ): """simple docstring""" lowercase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowercase_ : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase_ : Any = '''A red cat sitting on a park bench''' lowercase_ : Optional[Any] = np.random.RandomState(0 ) lowercase_ : str = pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowercase_ : int = output.images lowercase_ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) lowercase_ : int = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _snake_case ( self ): """simple docstring""" lowercase_ : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) lowercase_ : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) lowercase_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) lowercase_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = '''A red cat sitting on a park bench''' lowercase_ : Dict = np.random.RandomState(0 ) lowercase_ : int = pipe( prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , ) lowercase_ : Optional[Any] = output.images lowercase_ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) lowercase_ : List[str] = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
93
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Any = {'vocab_file': 'spiece.model'} lowercase__ : Dict = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } lowercase__ : Optional[Any] = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ['input_ids', 'attention_mask'] _snake_case : List[int] = [] def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> Tuple: '''simple docstring''' return self.sp_model.get_piece_size() def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = '''''' _UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__ ) + token _UpperCamelCase = True _UpperCamelCase = [] else: current_sub_tokens.append(lowerCAmelCase__ ) _UpperCamelCase = False out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str: '''simple docstring''' _UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ ) _UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCamelCase = [] _UpperCamelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) _UpperCamelCase = [] sub_texts.append(lowerCAmelCase__ ) else: current_sub_text.append(lowerCAmelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: _UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) ) else: _UpperCamelCase = ''''''.join(lowerCAmelCase__ ) _UpperCamelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ ) return clean_text else: return text def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] _UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
324
0
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging snake_case : List[Any] = logging.get_logger(__name__) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['input_values', 'padding_mask'] def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2_4000 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase ) a :Optional[int] = chunk_length_s a :Union[str, Any] = overlap @property def SCREAMING_SNAKE_CASE__ ( self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def SCREAMING_SNAKE_CASE__ ( self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' ) elif padding is None: # by default let's pad the inputs a :Optional[Any] = True a :Dict = bool( isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: a :Dict = [np.asarray(_lowerCamelCase , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ): a :Union[str, Any] = np.asarray(_lowerCamelCase , dtype=np.floataa ) elif isinstance(_lowerCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): a :List[Any] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: a :Tuple = [np.asarray(_lowerCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_lowerCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) a :Tuple = None a :Dict = BatchFeature({'''input_values''': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: a :Dict = min(array.shape[0] for array in raw_audio ) a :List[Any] = int(np.floor(max_length / self.chunk_stride ) ) a :Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: a :str = max(array.shape[0] for array in raw_audio ) a :Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) ) a :List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length a :List[Any] = '''max_length''' else: a :str = input_values # normal padding on batch if padded_inputs is None: a :Any = self.pad( _lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , padding=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) if padding: a :Tuple = padded_inputs.pop('''attention_mask''' ) a :Union[str, Any] = [] for example in padded_inputs.pop('''input_values''' ): if self.feature_size == 1: a :Optional[int] = example[..., None] input_values.append(example.T ) a :int = input_values if return_tensors is not None: a :Any = padded_inputs.convert_to_tensors(_lowerCamelCase ) return padded_inputs
94
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 'audio-spectrogram-transformer' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = patch_size _UpperCamelCase = qkv_bias _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins
324
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : Any = logging.get_logger() @dataclass class __lowerCAmelCase : _lowercase : nn.Module _lowercase : List[nn.Module] = field(default_factory=UpperCamelCase__) _lowercase : list = field(default_factory=UpperCamelCase__) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' a__ : Tuple =len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __lowerCAmelCase : _lowercase : nn.Module _lowercase : nn.Module _lowercase : int = 0 _lowercase : List = field(default_factory=UpperCamelCase__) _lowercase : List = field(default_factory=UpperCamelCase__) def __call__( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' a__ : Any =Tracker(self.dest )(lowerCAmelCase__ ).parametrized a__ : List[str] =Tracker(self.src )(lowerCAmelCase__ ).parametrized a__ : Tuple =list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) a__ : Any =list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" print(f'''Converting {name}...''' ) with torch.no_grad(): a__ : Tuple =timm.create_model(SCREAMING_SNAKE_CASE , pretrained=SCREAMING_SNAKE_CASE ).eval() a__ : str =ResNetForImageClassification(SCREAMING_SNAKE_CASE ).eval() a__ : List[str] =ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE ) a__ : Optional[Any] =torch.randn((1, 3, 224, 224) ) module_transfer(SCREAMING_SNAKE_CASE ) assert torch.allclose(from_model(SCREAMING_SNAKE_CASE ) , our_model(SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one." a__ : Union[str, Any] =f'''resnet{"-".join(name.split("resnet" ) )}''' print(SCREAMING_SNAKE_CASE ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE , ) # we can use the convnext one a__ : Optional[int] =AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE , ) print(f'''Pushed {checkpoint_name}''' ) def _A ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" a__ : int ="imagenet-1k-id2label.json" a__ : List[str] =1_000 a__ : List[Any] =(1, num_labels) a__ : str ="huggingface/label-files" a__ : Optional[int] =num_labels a__ : Union[str, Any] =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) ) a__ : str ={int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} a__ : int =idalabel a__ : Dict ={v: k for k, v in idalabel.items()} a__ : str =partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE ) a__ : Optional[Any] ={ "resnet18": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet26": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet34": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ), "resnet50": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet101": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), "resnet152": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="bottleneck" ), } if model_name: convert_weight_and_push(SCREAMING_SNAKE_CASE , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return config, expected_shape if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) UpperCAmelCase : str = parser.parse_args() UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
95
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase__ : Union[str, Any] = logging.get_logger(__name__) # General docstring lowercase__ : Dict = 'ResNetConfig' # Base docstring lowercase__ : str = 'microsoft/resnet-50' lowercase__ : Tuple = [1, 20_48, 7, 7] # Image classification docstring lowercase__ : Optional[Any] = 'microsoft/resnet-50' lowercase__ : List[str] = 'tiger cat' lowercase__ : List[Any] = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad( lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCamelCase = config.num_channels def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.pooler(lowerCAmelCase__ ) return embedding class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = out_channels // reduction _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = input for layer in self.layers: _UpperCamelCase = layer(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowerCAmelCase__ ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = ResNetConfig _snake_case : Union[str, Any] = 'resnet' _snake_case : Optional[int] = 'pixel_values' _snake_case : int = True def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' if isinstance(lowerCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = value lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowerCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config.num_labels _UpperCamelCase = ResNetModel(lowerCAmelCase__ ) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowerCAmelCase__ ) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = '''single_label_classification''' else: _UpperCamelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) super()._init_backbone(lowerCAmelCase__ ) _UpperCamelCase = [config.embedding_size] + config.hidden_sizes _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.hidden_states _UpperCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
324
0
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowercase__ = 5_0000 lowercase__ = 5000 lowercase__ , lowercase__ = os.path.split(__file__) lowercase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _snake_case ( lowercase__ , lowercase__ ): for i in range(lowercase__ ): _lowerCamelCase : Optional[Any] = dataset[i] @get_duration def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): for i in range(0 , len(lowercase__ ) , lowercase__ ): _lowerCamelCase : str = dataset[i : i + batch_size] @get_duration def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): with dataset.formatted_as(type=lowercase__ ): for i in range(lowercase__ ): _lowerCamelCase : Optional[Any] = dataset[i] @get_duration def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ): with dataset.formatted_as(type=lowercase__ ): for i in range(0 , lowercase__ , lowercase__ ): _lowerCamelCase : Tuple = dataset[i : i + batch_size] def _snake_case ( ): _lowerCamelCase : List[str] = {'num examples': SPEED_TEST_N_EXAMPLES} _lowerCamelCase : str = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] _lowerCamelCase : Dict = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) _lowerCamelCase : List[Any] = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) _lowerCamelCase : List[Any] = generate_example_dataset( os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(lowercase__ ) ) _lowerCamelCase : int = func(lowercase__ , **lowercase__ ) print('shuffling dataset' ) _lowerCamelCase : Dict = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(lowercase__ ) ) _lowerCamelCase : List[str] = func( lowercase__ , **lowercase__ ) with open(lowercase__ , 'wb' ) as f: f.write(json.dumps(lowercase__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
96
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def a__ ( lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" if isinstance(lowercase, collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCAmelCase : """simple docstring""" def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple ) -> int: '''simple docstring''' pass def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str: '''simple docstring''' _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = after_output[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple: '''simple docstring''' pt_model.to(lowerCAmelCase__ ) pt_model.eval() # prepare inputs _UpperCamelCase = inputs_dict _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple() _UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ ) _UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ ) pt_model_loaded.to(lowerCAmelCase__ ) pt_model_loaded.eval() with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 ) def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ ) _UpperCamelCase = fx_state self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__ ) @is_pt_flax_cross_test def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_inputs_dict.pop('''vision_config''' ) _UpperCamelCase = config_inputs_dict.pop('''text_config''' ) _UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = after_outputs[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-5 ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxViTModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = FlaxViTModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : List[str] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ) _UpperCamelCase = model(**lowerCAmelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
324
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase ( A__ ): """simple docstring""" _a = 'xlnet' _a = ['mems'] _a = { 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , UpperCamelCase_=32000 , UpperCamelCase_=1024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4096 , UpperCamelCase_="gelu" , UpperCamelCase_=True , UpperCamelCase_="bi" , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=-1 , UpperCamelCase_=False , UpperCamelCase_="last" , UpperCamelCase_=True , UpperCamelCase_="tanh" , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = vocab_size UpperCamelCase__ :List[str] = d_model UpperCamelCase__ :Dict = n_layer UpperCamelCase__ :Any = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) UpperCamelCase__ :List[Any] = d_model // n_head UpperCamelCase__ :str = ff_activation UpperCamelCase__ :Optional[int] = d_inner UpperCamelCase__ :int = untie_r UpperCamelCase__ :Optional[int] = attn_type UpperCamelCase__ :Optional[int] = initializer_range UpperCamelCase__ :Optional[int] = layer_norm_eps UpperCamelCase__ :Tuple = dropout UpperCamelCase__ :Union[str, Any] = mem_len UpperCamelCase__ :Optional[int] = reuse_len UpperCamelCase__ :Optional[Any] = bi_data UpperCamelCase__ :Optional[Any] = clamp_len UpperCamelCase__ :Optional[Any] = same_length UpperCamelCase__ :List[str] = summary_type UpperCamelCase__ :Optional[Any] = summary_use_proj UpperCamelCase__ :Optional[Any] = summary_activation UpperCamelCase__ :int = summary_last_dropout UpperCamelCase__ :Dict = start_n_top UpperCamelCase__ :Union[str, Any] = end_n_top UpperCamelCase__ :Any = bos_token_id UpperCamelCase__ :int = pad_token_id UpperCamelCase__ :Optional[int] = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , UpperCamelCase_ , ) UpperCamelCase__ :Optional[Any] = kwargs['''use_cache'''] UpperCamelCase__ :List[Any] = use_mems_eval UpperCamelCase__ :Any = use_mems_train super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase__ ( self , UpperCamelCase_ ): '''simple docstring''' raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
97
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Union[str, Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] _UpperCamelCase = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase__ ) _UpperCamelCase = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
324
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase__ : int = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] = ['GLPNFeatureExtractor'] lowerCAmelCase__ : Any = ['GLPNImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Optional[int] = [ 'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST', 'GLPNForDepthEstimation', 'GLPNLayer', 'GLPNModel', 'GLPNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys lowerCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
98
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std def snake_case__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Tuple = LevitImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = LevitImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
324
0
def A_ ( A__ ) -> bool: a__ : Union[str, Any] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def A_ ( A__ = 5000 ) -> int: a__ : Dict = [(i * (3 * i - 1)) // 2 for i in range(1 , A__ )] for i, pentagonal_i in enumerate(A__ ): for j in range(A__ , len(A__ ) ): a__ : List[str] = pentagonal_nums[j] a__ : Optional[Any] = pentagonal_i + pentagonal_j a__ : Tuple = pentagonal_j - pentagonal_i if is_pentagonal(A__ ) and is_pentagonal(A__ ): return b return -1 if __name__ == "__main__": print(F"""{solution() = }""")
99
'''simple docstring''' import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE lowercase__ : int = 'config.json' lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin' lowercase__ : List[str] = 'diffusion_flax_model.msgpack' lowercase__ : str = 'model.onnx' lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors' lowercase__ : List[str] = 'weights.pb' lowercase__ : str = 'https://huggingface.co' lowercase__ : str = default_cache_path lowercase__ : Optional[int] = 'diffusers_modules' lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules')) lowercase__ : Tuple = ['fp16', 'non-ema'] lowercase__ : int = '.self_attn'
324
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=lowerCAmelCase__ , ) assert hasattr(self , """env""") def snake_case_ ( self , lowerCAmelCase__=1): # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=lowerCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase__ , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def snake_case_ ( self , lowerCAmelCase__): TrainingJobAnalytics(lowerCAmelCase__).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def snake_case_ ( self): # create estimator __SCREAMING_SNAKE_CASE = self.create_estimator() # run training estimator.fit() # result dataframe __SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis __SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) __SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping __SCREAMING_SNAKE_CASE = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCAmelCase__)
100
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : str = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def a__ ( lowercase : str ) -> Dict: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase = k.replace(lowercase, lowercase ) if k.startswith('''encoder''' ): _UpperCamelCase = k.replace('''.attn''', '''.self_attn''' ) _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' ) return k def a__ ( lowercase : List[str] ) -> List[Any]: """simple docstring""" _UpperCamelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _UpperCamelCase = sd.pop(lowercase ) _UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd _UpperCamelCase = v lowercase__ : str = ['START'] @torch.no_grad() def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : List[str] ) -> Dict: """simple docstring""" _UpperCamelCase = torch.load(lowercase, map_location='''cpu''' ) _UpperCamelCase = model['''model'''] _UpperCamelCase = BlenderbotConfig.from_json_file(lowercase ) _UpperCamelCase = BlenderbotForConditionalGeneration(lowercase ) _UpperCamelCase = m.model.state_dict().keys() _UpperCamelCase = [] _UpperCamelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase = rename_state_dict_key(lowercase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowercase ) m.model.load_state_dict(lowercase, strict=lowercase ) m.half() m.save_pretrained(lowercase ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) lowercase__ : Optional[Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
324
0
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase ( unittest.TestCase ): def A__ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() def A__ ( self): lowercase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''') lowercase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''') lowercase = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase , lowercase = FlaxStableDiffusionInpaintPipeline.from_pretrained(A__ ,safety_checker=A__) lowercase = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase = jax.random.PRNGKey(0) lowercase = 5_0 lowercase = jax.device_count() lowercase = num_samples * [prompt] lowercase = num_samples * [init_image] lowercase = num_samples * [mask_image] lowercase , lowercase , lowercase = pipeline.prepare_inputs(A__ ,A__ ,A__) # shard inputs and rng lowercase = replicate(A__) lowercase = jax.random.split(A__ ,jax.device_count()) lowercase = shard(A__) lowercase = shard(A__) lowercase = shard(A__) lowercase = pipeline( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,jit=A__) lowercase = output.images.reshape(A__ ,5_1_2 ,5_1_2 ,3) lowercase = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase = jnp.asarray(jax.device_get(image_slice.flatten())) lowercase = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084]) print(f'output_slice: {output_slice}') assert jnp.abs(output_slice - expected_slice).max() < 1E-2
101
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Tuple = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
0
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class _UpperCAmelCase : '''simple docstring''' def __init__(self , a_ , a_=14 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=4 , a_=4 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=0.02 , ): '''simple docstring''' __snake_case : Any = parent __snake_case : List[str] = batch_size __snake_case : Union[str, Any] = seq_length __snake_case : Tuple = is_training __snake_case : List[Any] = use_input_mask __snake_case : List[Any] = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : Union[str, Any] = vocab_size __snake_case : Dict = hidden_size __snake_case : int = rotary_dim __snake_case : Any = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Union[str, Any] = hidden_act __snake_case : List[str] = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Optional[int] = max_position_embeddings __snake_case : Tuple = initializer_range __snake_case : int = None __snake_case : Optional[Any] = vocab_size - 1 __snake_case : List[str] = vocab_size - 1 __snake_case : Tuple = vocab_size - 1 def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : int = None if self.use_input_mask: __snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : int = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=a_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : int = config_and_inputs __snake_case : Tuple = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : Any = 20 __snake_case : int = model_class_name(a_ ) __snake_case : Tuple = model.init_cache(input_ids.shape[0] , a_ ) __snake_case : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) __snake_case : List[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __snake_case : Optional[Any] = model( input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , ) __snake_case : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) __snake_case : List[str] = model( input_ids[:, -1:] , attention_mask=a_ , past_key_values=outputs_cache.past_key_values , position_ids=a_ , ) __snake_case : Union[str, Any] = model(a_ ) __snake_case : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ ): '''simple docstring''' __snake_case : List[str] = 20 __snake_case : str = model_class_name(a_ ) __snake_case : Tuple = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __snake_case : Union[str, Any] = model.init_cache(input_ids.shape[0] , a_ ) __snake_case : Any = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __snake_case : List[str] = model( input_ids[:, :-1] , attention_mask=a_ , past_key_values=a_ , position_ids=a_ , ) __snake_case : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) __snake_case : Optional[int] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=a_ , position_ids=a_ , ) __snake_case : Optional[int] = model(a_ , attention_mask=a_ ) __snake_case : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" ) @require_flax class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowerCamelCase__ =(FlaxGPTJForCausalLM,) if is_flax_available() else () def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = FlaxGPTJModelTester(self ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case , __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(a_ , a_ , a_ , a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case , __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( a_ , a_ , a_ , a_ ) @tooslow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) __snake_case : str = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=a_ , truncation=a_ ) __snake_case : Optional[int] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) __snake_case : Any = False __snake_case : List[str] = model.config.eos_token_id __snake_case : Dict = jax.jit(model.generate ) __snake_case : Dict = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences __snake_case : Union[str, Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) __snake_case : int = [ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(a_ , a_ ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __snake_case : Tuple = self._prepare_for_class(a_ , a_ ) __snake_case : str = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __snake_case : int = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case : List[Any] = getattr(a_ , a_ ) __snake_case , __snake_case : Dict = pt_inputs['''input_ids'''].shape __snake_case : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(a_ ): __snake_case : List[str] = 0 __snake_case : Tuple = 1 __snake_case : int = 0 __snake_case : Optional[Any] = 1 __snake_case : Any = pt_model_class(a_ ).eval() __snake_case : int = model_class(a_ , dtype=jnp.floataa ) __snake_case : Dict = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ ) __snake_case : str = fx_state with torch.no_grad(): __snake_case : int = pt_model(**a_ ).to_tuple() __snake_case : str = fx_model(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(a_ , a_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(a_ ) __snake_case : List[str] = model_class.from_pretrained(a_ , from_pt=a_ ) __snake_case : int = fx_model_loaded(**a_ ).to_tuple() self.assertEqual( len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(a_ , a_ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __snake_case : List[Any] = self._prepare_for_class(a_ , a_ ) __snake_case : Dict = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __snake_case : str = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case : List[Any] = getattr(a_ , a_ ) __snake_case : List[str] = pt_model_class(a_ ).eval() __snake_case : int = model_class(a_ , dtype=jnp.floataa ) __snake_case : Dict = load_flax_weights_in_pytorch_model(a_ , fx_model.params ) __snake_case , __snake_case : Union[str, Any] = pt_inputs['''input_ids'''].shape __snake_case : Optional[Any] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(a_ ): __snake_case : Any = 0 __snake_case : int = 1 __snake_case : int = 0 __snake_case : int = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __snake_case : List[Any] = pt_model(**a_ ).to_tuple() __snake_case : List[str] = fx_model(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(a_ , a_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(a_ ) __snake_case : Any = pt_model_class.from_pretrained(a_ , from_flax=a_ ) with torch.no_grad(): __snake_case : Union[str, Any] = pt_model_loaded(**a_ ).to_tuple() self.assertEqual( len(a_ ) , len(a_ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(a_ , a_ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : List[str] = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) __snake_case : int = model(np.ones((1, 1) ) ) self.assertIsNotNone(a_ )
102
'''simple docstring''' import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowercase__ : Any = logging.get_logger(__name__) class __lowerCAmelCase : """simple docstring""" _snake_case : List[str] = None @experimental def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int: """simple docstring""" if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]: """simple docstring""" _UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase ) _UpperCamelCase = [] # We organize the splits ourselve (contiguous splits) for index in range(lowercase ): _UpperCamelCase = len(lowercase ) // num_proc _UpperCamelCase = len(lowercase ) % num_proc _UpperCamelCase = div * index + min(lowercase, lowercase ) _UpperCamelCase = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(lowercase )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) _UpperCamelCase , _UpperCamelCase = None, None if not disable_tqdm: _UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool: _UpperCamelCase = pool.map(lowercase, lowercase ) logger.info(F"""Finished {num_proc} processes""" ) _UpperCamelCase = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(lowercase )} objects""" ) return mapped def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any: """simple docstring""" import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ): return joblib.Parallel()( joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def a__ ( lowercase : str ) -> Optional[int]: """simple docstring""" _UpperCamelCase = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: _UpperCamelCase = None
324
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __snake_case ( unittest.TestCase ): _a = JukeboxTokenizer _a = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def UpperCAmelCase__ ( self : str): import torch lowerCAmelCase_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') lowerCAmelCase_ : Any = tokenizer(**self.metas)['''input_ids'''] # fmt: off lowerCAmelCase_ : List[str] = [ torch.tensor([[ 0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7, 7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2, 4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5, 3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6, 4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8, 2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4, 4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1, 3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6, 4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9, 3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4, 4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9, 4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6, 4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3, 7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6, 4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8, 2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0, 7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5, 7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4, 7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def UpperCAmelCase__ ( self : Any): import torch lowerCAmelCase_ : str = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') lowerCAmelCase_ : List[Any] = tokenizer(**self.metas)['''input_ids'''] # fmt: off lowerCAmelCase_ : Optional[int] = [ torch.tensor([[ 0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9, 3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1, 7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8, 2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1, 3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7, 7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5, 6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7, 3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1, 3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5, 3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4, 3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2, 3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7, 1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2, 4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7, 4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1, 7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5, 2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
103
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=7 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=1 / 255 , lowerCAmelCase__ : Tuple=True , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=False ) -> str: '''simple docstring''' if not batched: _UpperCamelCase = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): _UpperCamelCase , _UpperCamelCase = image.size else: _UpperCamelCase , _UpperCamelCase = image.shape[1], image.shape[2] if w < h: _UpperCamelCase = int(self.size['''shortest_edge'''] * h / w ) _UpperCamelCase = self.size['''shortest_edge'''] elif w > h: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = int(self.size['''shortest_edge'''] * w / h ) else: _UpperCamelCase = self.size['''shortest_edge'''] _UpperCamelCase = self.size['''shortest_edge'''] else: _UpperCamelCase = [] for image in image_inputs: _UpperCamelCase , _UpperCamelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] _UpperCamelCase = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = DeformableDetrImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) _UpperCamelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' pass def snake_case__ ( self : int ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : str ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values _UpperCamelCase , _UpperCamelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case__ ( self : int ) -> Tuple: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''image_id''': 39769, '''annotations''': target} # encode them _UpperCamelCase = DeformableDetrImageProcessor() _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) ) @slow def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _UpperCamelCase = json.loads(f.read() ) _UpperCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} _UpperCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _UpperCamelCase = DeformableDetrImageProcessor(format='''coco_panoptic''' ) _UpperCamelCase = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors='''pt''' ) # verify pixel values _UpperCamelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) # verify area _UpperCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowerCAmelCase__ ) ) # verify boxes _UpperCamelCase = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowerCAmelCase__ ) _UpperCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowerCAmelCase__ , atol=1e-3 ) ) # verify image_id _UpperCamelCase = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowerCAmelCase__ ) ) # verify is_crowd _UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowerCAmelCase__ ) ) # verify class_labels _UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowerCAmelCase__ ) ) # verify masks _UpperCamelCase = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowerCAmelCase__ ) # verify orig_size _UpperCamelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowerCAmelCase__ ) ) # verify size _UpperCamelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowerCAmelCase__ ) )
324
0
'''simple docstring''' import cva import numpy as np class lowercase_ : """simple docstring""" def __init__( self : str ,lowercase__ : float ,lowercase__ : int ): if k in (0.0_4, 0.0_6): __lowercase = k __lowercase = window_size else: raise ValueError('''invalid k value''' ) def __str__( self : Dict ): return str(self.k ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ): __lowercase = cva.imread(lowercase__ ,0 ) __lowercase , __lowercase = img.shape __lowercase = [] __lowercase = img.copy() __lowercase = cva.cvtColor(lowercase__ ,cva.COLOR_GRAY2RGB ) __lowercase , __lowercase = np.gradient(lowercase__ ) __lowercase = dx**2 __lowercase = dy**2 __lowercase = dx * dy __lowercase = 0.0_4 __lowercase = self.window_size // 2 for y in range(lowercase__ ,h - offset ): for x in range(lowercase__ ,w - offset ): __lowercase = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() __lowercase = (wxx * wyy) - (wxy**2) __lowercase = wxx + wyy __lowercase = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_5_5 ) return color_img, corner_list if __name__ == "__main__": lowerCAmelCase__ = HarrisCorner(0.04, 3) lowerCAmelCase__ , lowerCAmelCase__ = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
104
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: lowercase__ : str = None lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} lowercase__ : int = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } lowercase__ : Optional[int] = { 'google/rembert': 2_56, } lowercase__ : str = '▁' class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : str = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Dict = RemBertTokenizer def __init__( self : List[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : List[Any]="[CLS]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : Optional[int]="[SEP]" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : str="[CLS]" , lowerCAmelCase__ : List[Any]="[MASK]" , **lowerCAmelCase__ : List[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCamelCase = do_lower_case _UpperCamelCase = remove_space _UpperCamelCase = keep_accents _UpperCamelCase = vocab_file _UpperCamelCase = False if not self.vocab_file else True def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__ ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
324
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a : List[str] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
105
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : str = logging.get_logger(__name__) lowercase__ : Any = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Tuple = 'deformable_detr' _snake_case : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[Any] , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : List[str]=300 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : Tuple=6 , lowerCAmelCase__ : Union[str, Any]=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Tuple=1024 , lowerCAmelCase__ : List[Any]=8 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any="relu" , lowerCAmelCase__ : int=256 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=False , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Optional[int]=300 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Optional[Any]=1 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : int=0.25 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : Optional[Any] , ) -> str: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = backbone_config.get('''model_type''' ) _UpperCamelCase = CONFIG_MAPPING[backbone_model_type] _UpperCamelCase = config_class.from_dict(lowerCAmelCase__ ) _UpperCamelCase = use_timm_backbone _UpperCamelCase = backbone_config _UpperCamelCase = num_channels _UpperCamelCase = num_queries _UpperCamelCase = max_position_embeddings _UpperCamelCase = d_model _UpperCamelCase = encoder_ffn_dim _UpperCamelCase = encoder_layers _UpperCamelCase = encoder_attention_heads _UpperCamelCase = decoder_ffn_dim _UpperCamelCase = decoder_layers _UpperCamelCase = decoder_attention_heads _UpperCamelCase = dropout _UpperCamelCase = attention_dropout _UpperCamelCase = activation_dropout _UpperCamelCase = activation_function _UpperCamelCase = init_std _UpperCamelCase = init_xavier_std _UpperCamelCase = encoder_layerdrop _UpperCamelCase = auxiliary_loss _UpperCamelCase = position_embedding_type _UpperCamelCase = backbone _UpperCamelCase = use_pretrained_backbone _UpperCamelCase = dilation # deformable attributes _UpperCamelCase = num_feature_levels _UpperCamelCase = encoder_n_points _UpperCamelCase = decoder_n_points _UpperCamelCase = two_stage _UpperCamelCase = two_stage_num_proposals _UpperCamelCase = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _UpperCamelCase = class_cost _UpperCamelCase = bbox_cost _UpperCamelCase = giou_cost # Loss coefficients _UpperCamelCase = mask_loss_coefficient _UpperCamelCase = dice_loss_coefficient _UpperCamelCase = bbox_loss_coefficient _UpperCamelCase = giou_loss_coefficient _UpperCamelCase = eos_coefficient _UpperCamelCase = focal_alpha _UpperCamelCase = disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def snake_case__ ( self : int ) -> int: '''simple docstring''' return self.d_model def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _UpperCamelCase = self.backbone_config.to_dict() _UpperCamelCase = self.__class__.model_type return output
324
0
"""simple docstring""" import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : str ,lowercase_ : List[str] ,lowercase_ : Optional[int]=1_4 ,lowercase_ : Any=7 ,lowercase_ : List[Any]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Any=True ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Union[str, Any]=9_9 ,lowercase_ : Any=3_2 ,lowercase_ : str=5 ,lowercase_ : int=4 ,lowercase_ : str=3_7 ,lowercase_ : Optional[Any]="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : Any=0.1 ,lowercase_ : Tuple=5_1_2 ,lowercase_ : str=1_6 ,lowercase_ : str=2 ,lowercase_ : List[Any]=0.02 ,lowercase_ : int=3 ,lowercase_ : List[str]=4 ,lowercase_ : int=None ,): lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : Union[str, Any] = batch_size lowerCAmelCase__ : Optional[Any] = seq_length lowerCAmelCase__ : Any = is_training lowerCAmelCase__ : int = use_token_type_ids lowerCAmelCase__ : Optional[int] = use_input_mask lowerCAmelCase__ : List[str] = use_labels lowerCAmelCase__ : Optional[Any] = use_mc_token_ids lowerCAmelCase__ : List[str] = vocab_size lowerCAmelCase__ : int = hidden_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Tuple = num_attention_heads lowerCAmelCase__ : Tuple = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : int = attention_probs_dropout_prob lowerCAmelCase__ : List[Any] = max_position_embeddings lowerCAmelCase__ : Optional[int] = type_vocab_size lowerCAmelCase__ : int = type_sequence_label_size lowerCAmelCase__ : List[str] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : Optional[int] = num_choices lowerCAmelCase__ : Dict = scope lowerCAmelCase__ : List[Any] = self.vocab_size - 1 def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : int = None if self.use_token_type_ids: lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowerCAmelCase__ : Optional[int] = None if self.use_mc_token_ids: lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length ) lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : int = None if self.use_labels: lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Optional[int] = self.get_config() lowerCAmelCase__ : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def __lowerCAmelCase ( self : int ): return CTRLConfig( vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,) def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[int] ,lowercase_ : Dict ,lowercase_ : List[Any] ,lowercase_ : Any ,lowercase_ : str ,*lowercase_ : str ): lowerCAmelCase__ : Tuple = CTRLModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() model(lowercase_ ,token_type_ids=lowercase_ ,head_mask=lowercase_ ) model(lowercase_ ,token_type_ids=lowercase_ ) lowerCAmelCase__ : Tuple = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer ) def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Dict ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : List[str] ,lowercase_ : Any ,*lowercase_ : Optional[Any] ): lowerCAmelCase__ : List[str] = CTRLLMHeadModel(lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : int = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) , ) : Tuple = config_and_inputs lowerCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask} return config, inputs_dict def __lowerCAmelCase ( self : Any ,lowercase_ : str ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Optional[int] ,*lowercase_ : Optional[Any] ): lowerCAmelCase__ : List[str] = self.num_labels lowerCAmelCase__ : Optional[Any] = CTRLForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : Dict = model(lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else () lowercase__ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True lowercase__ = False lowercase__ = False def __lowerCAmelCase ( self : Dict ,lowercase_ : Dict ,lowercase_ : Any ,lowercase_ : List[str] ,lowercase_ : str ,lowercase_ : Optional[Any] ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def __lowerCAmelCase ( self : Optional[Any] ): lowerCAmelCase__ : Optional[Any] = CTRLModelTester(self ) lowerCAmelCase__ : Any = ConfigTester(self ,config_class=lowercase_ ,n_embd=3_7 ) def __lowerCAmelCase ( self : Optional[int] ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self : Dict ): self.config_tester.run_common_tests() def __lowerCAmelCase ( self : int ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*lowercase_ ) def __lowerCAmelCase ( self : Any ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self : List[str] ): pass @slow def __lowerCAmelCase ( self : Any ): for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : int = CTRLModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def __lowerCAmelCase ( self : Any ): pass @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : str ): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def __lowerCAmelCase ( self : Optional[int] ): lowerCAmelCase__ : List[str] = CTRLLMHeadModel.from_pretrained('''ctrl''' ) model.to(lowercase_ ) lowerCAmelCase__ : str = torch.tensor( [[1_1_8_5_9, 0, 1_6_1_1, 8]] ,dtype=torch.long ,device=lowercase_ ) # Legal the president is lowerCAmelCase__ : List[str] = [ 1_1_8_5_9, 0, 1_6_1_1, 8, 5, 1_5_0, 2_6_4_4_9, 2, 1_9, 3_4_8, 4_6_9, 3, 2_5_9_5, 4_8, 2_0_7_4_0, 2_4_6_5_3_3, 2_4_6_5_3_3, 1_9, 3_0, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a lowerCAmelCase__ : List[Any] = model.generate(lowercase_ ,do_sample=lowercase_ ) self.assertListEqual(output_ids[0].tolist() ,lowercase_ )
106
'''simple docstring''' from __future__ import annotations def a__ ( lowercase : str, lowercase : list[str] | None = None, lowercase : dict[str, float] | None = None, lowercase : bool = False, ) -> tuple[int, float, str]: """simple docstring""" _UpperCamelCase = cipher_alphabet or [chr(lowercase ) for i in range(97, 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary _UpperCamelCase = frequencies_dict if not case_sensitive: _UpperCamelCase = ciphertext.lower() # Chi squared statistic values _UpperCamelCase = {} # cycle through all of the shifts for shift in range(len(lowercase ) ): _UpperCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( lowercase ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.lower().count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCamelCase = decrypted_with_shift.count(lowercase ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(lowercase : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCamelCase = min( lowercase, key=lowercase, ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
324
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class snake_case__ (_UpperCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = """roberta""" def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=5_02_65 , __lowerCamelCase : Optional[Any]=7_68 , __lowerCamelCase : List[Any]=12 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=30_72 , __lowerCamelCase : Any="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : int=5_12 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Union[str, Any]=1e-12 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]="absolute" , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] , ) -> Tuple: super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase ) a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = position_embedding_type a = use_cache a = classifier_dropout class snake_case__ (_UpperCamelCase ): """simple docstring""" @property def __UpperCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": a = {0: "batch", 1: "choice", 2: "sequence"} else: a = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
107
'''simple docstring''' import math def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list: """simple docstring""" _UpperCamelCase = end or len(lowercase ) for i in range(lowercase, lowercase ): _UpperCamelCase = i _UpperCamelCase = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _UpperCamelCase = array[temp_index - 1] temp_index -= 1 _UpperCamelCase = temp_index_value return array def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap """simple docstring""" _UpperCamelCase = index _UpperCamelCase = 2 * index + 1 # Left Node _UpperCamelCase = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _UpperCamelCase = left_index if right_index < heap_size and array[largest] < array[right_index]: _UpperCamelCase = right_index if largest != index: _UpperCamelCase , _UpperCamelCase = array[largest], array[index] heapify(lowercase, lowercase, lowercase ) def a__ ( lowercase : list ) -> list: """simple docstring""" _UpperCamelCase = len(lowercase ) for i in range(n // 2, -1, -1 ): heapify(lowercase, lowercase, lowercase ) for i in range(n - 1, 0, -1 ): _UpperCamelCase , _UpperCamelCase = array[0], array[i] heapify(lowercase, 0, lowercase ) return array def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int: """simple docstring""" _UpperCamelCase = low _UpperCamelCase = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _UpperCamelCase , _UpperCamelCase = array[j], array[i] i += 1 def a__ ( lowercase : list ) -> list: """simple docstring""" if len(lowercase ) == 0: return array _UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) ) _UpperCamelCase = 16 return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase ) def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(lowercase ) max_depth -= 1 _UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 ) _UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase ) intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase ) _UpperCamelCase = p return insertion_sort(lowercase, lowercase, lowercase ) if __name__ == "__main__": import doctest doctest.testmod() lowercase__ : Any = input('Enter numbers separated by a comma : ').strip() lowercase__ : Any = [float(item) for item in user_input.split(',')] print(sort(unsorted))
324
0
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = '''▁''' lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ): """simple docstring""" a : str =BigBirdTokenizer a : Union[str, Any] =BigBirdTokenizerFast a : Tuple =True a : Any =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCAmelCase : str = self.tokenizer_class(snake_case__ , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : str = "<s>" lowerCAmelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "[MASK]" ) self.assertEqual(len(snake_case__ ) , 1_004 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def lowercase__ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase : Tuple = self.get_tokenizer() lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() lowerCAmelCase : Tuple = "I was born in 92000, and this is falsé." lowerCAmelCase : Optional[int] = tokenizer.tokenize(snake_case__ ) lowerCAmelCase : int = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) lowerCAmelCase : int = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) lowerCAmelCase : List[Any] = self.get_rust_tokenizer() lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ ) lowerCAmelCase : List[Any] = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Any = BigBirdTokenizer(snake_case__ , keep_accents=snake_case__ ) lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [285, 46, 10, 170, 382] , ) lowerCAmelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def lowercase__ ( self ): """simple docstring""" return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[Any] = "Hello World!" lowerCAmelCase : Any = [65, 18_536, 2_260, 101, 66] self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Union[str, Any] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) # fmt: off lowerCAmelCase : List[str] = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) ) @require_torch @slow def lowercase__ ( self ): """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowerCAmelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCAmelCase : int = " ".join(snake_case__ ) lowerCAmelCase : Dict = self.big_tokenizer.encode_plus(snake_case__ , return_tensors="pt" , return_token_type_ids=snake_case__ ) lowerCAmelCase : Any = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=snake_case__ ) lowerCAmelCase : str = BigBirdConfig(attention_type="original_full" ) lowerCAmelCase : Any = BigBirdModel(snake_case__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**snake_case__ ) model(**snake_case__ ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : List[str] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" ) lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids ) self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" ) @slow def lowercase__ ( self ): """simple docstring""" lowerCAmelCase : Any = {"input_ids": [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
108
'''simple docstring''' import os import numpy import onnx def a__ ( lowercase : List[str], lowercase : str ) -> List[Any]: """simple docstring""" _UpperCamelCase = a.name _UpperCamelCase = b.name _UpperCamelCase = '''''' _UpperCamelCase = '''''' _UpperCamelCase = a == b _UpperCamelCase = name_a _UpperCamelCase = name_b return res def a__ ( lowercase : List[str], lowercase : List[Any], lowercase : Tuple ) -> int: """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowercase, lowercase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) _graph_replace_input_with(node_proto.attribute[1].g, lowercase, lowercase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g, lowercase, lowercase ) def a__ ( lowercase : Any, lowercase : Union[str, Any], lowercase : Dict ) -> Tuple: """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowercase, lowercase, lowercase ) def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : Optional[int] ) -> Tuple: """simple docstring""" _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _UpperCamelCase = inits[i].name _UpperCamelCase = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph, lowercase, lowercase ) def a__ ( lowercase : Dict ) -> Dict: """simple docstring""" _UpperCamelCase = os.path.dirname(lowercase ) _UpperCamelCase = os.path.basename(lowercase ) _UpperCamelCase = onnx.load(os.path.join(lowercase, lowercase ) ) _UpperCamelCase = list(model.graph.initializer ) _UpperCamelCase = set() _UpperCamelCase = {} _UpperCamelCase = [] _UpperCamelCase = 0 for i in range(len(lowercase ) ): if i in dup_set: continue for j in range(i + 1, len(lowercase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i], inits[j] ): dup_set.add(lowercase ) dup_set.add(lowercase ) _UpperCamelCase = inits[j].data_type _UpperCamelCase = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''', lowercase ) total_reduced_size += mem_size _UpperCamelCase = inits[i].name _UpperCamelCase = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowercase ) else: _UpperCamelCase = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''', total_reduced_size / 1024 / 1024 / 1024, '''GB''' ) _UpperCamelCase = sorted(lowercase ) _remove_dup_initializers_from_model(lowercase, lowercase, lowercase ) _UpperCamelCase = '''optimized_''' + model_file_name _UpperCamelCase = os.path.join(lowercase, lowercase ) onnx.save(lowercase, lowercase ) return new_model
324
0
"""simple docstring""" def _snake_case ( UpperCamelCase : int ): if num <= 0: raise ValueError("""Input must be a positive integer""" ) UpperCAmelCase : Any = [True] * (num + 1) UpperCAmelCase : Any = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , UpperCamelCase ): UpperCAmelCase : int = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() A: int = int(input("Enter a positive integer: ").strip()) print(prime_sieve_eratosthenes(user_num))
109
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right lowercase__ : List[Any] = 25_00_04 lowercase__ : str = 25_00_20 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = MBartTokenizer _snake_case : Tuple = MBartTokenizerFast _snake_case : List[str] = True _snake_case : Optional[Any] = True def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) _UpperCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return _UpperCamelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) _UpperCamelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=True _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) # Save tokenizer rust, legacy_format=False _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.save_pretrained(lowerCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCamelCase = tokenizer_r.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = tokenizer_p.from_pretrained(lowerCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) shutil.rmtree(lowerCAmelCase__ ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _snake_case : Dict = 'facebook/mbart-large-en-ro' _snake_case : Dict = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] _snake_case : List[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] _snake_case : Union[str, Any] = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def snake_case__ ( cls : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) _UpperCamelCase = 1 return cls def snake_case__ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 ) def snake_case__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def snake_case__ ( self : str ) -> List[Any]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) _UpperCamelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] _UpperCamelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) _UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ['''this is gunna be a long sentence ''' * 20] assert isinstance(src_text[0] , lowerCAmelCase__ ) _UpperCamelCase = 10 _UpperCamelCase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250026, 250001] ) def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = MBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def snake_case__ ( self : Optional[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _UpperCamelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) _UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def snake_case__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' ) _UpperCamelCase = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' ) _UpperCamelCase = targets['''input_ids'''] _UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def snake_case__ ( self : Tuple ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[62, 3034, 2, 250004]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 250001, } , )
324
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers A = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def __A ( ) -> Dict: __a : int = os.path.dirname(os.path.realpath(a_)) __a : Union[str, Any] = os.path.join(a_ , '''words.txt''') __a : List[str] = '''''' with open(a_) as f: __a : List[Any] = f.readline() __a : Any = [word.strip('''"''') for word in words.strip('''\r\n''').split(''',''')] __a : List[str] = [ word for word in [sum(ord(a_) - 64 for x in word) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a_) if __name__ == "__main__": print(solution())
160
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch lowercase__ : str = logging.get_logger(__name__) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Union[str, Any] = ['pixel_values'] def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 256} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ ) return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray: '''simple docstring''' return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray: '''simple docstring''' return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any: '''simple docstring''' _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ ) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' ) _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]: '''simple docstring''' _UpperCamelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCAmelCase__ ): _UpperCamelCase = target_sizes.numpy() _UpperCamelCase = [] for idx in range(len(lowerCAmelCase__ ) ): _UpperCamelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ ) _UpperCamelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCAmelCase__ ) else: _UpperCamelCase = logits.argmax(dim=1 ) _UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
324
0
"""simple docstring""" import csv import tweepy # Twitter API credentials __a = '' __a = '' __a = '' __a = '' def A_ ( _lowercase ): '''simple docstring''' snake_case_ :List[Any] = tweepy.OAuthHandler(_lowercase, _lowercase ) auth.set_access_token(_lowercase, _lowercase ) snake_case_ :List[Any] = tweepy.API(_lowercase ) # initialize a list to hold all the tweepy Tweets snake_case_ :Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) snake_case_ :Tuple = api.user_timeline(screen_name=_lowercase, count=200 ) # save most recent tweets alltweets.extend(_lowercase ) # save the id of the oldest tweet less one snake_case_ :List[str] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_lowercase ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates snake_case_ :Dict = api.user_timeline( screen_name=_lowercase, count=200, max_id=_lowercase ) # save most recent tweets alltweets.extend(_lowercase ) # update the id of the oldest tweet less one snake_case_ :Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(_lowercase )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv snake_case_ :Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""", """w""" ) as f: snake_case_ :int = csv.writer(_lowercase ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(_lowercase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("FirePing32")
66
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : jnp.ndarray @flax_register_to_config class __lowerCAmelCase ( nn.Module , __magic_name__ , __magic_name__ ): """simple docstring""" _snake_case : int = 3_2 _snake_case : int = 4 _snake_case : int = 4 _snake_case : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _snake_case : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") _snake_case : Union[bool, Tuple[bool]] = False _snake_case : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) _snake_case : int = 2 _snake_case : Union[int, Tuple[int]] = 8 _snake_case : Optional[Union[int, Tuple[int]]] = None _snake_case : int = 1_2_8_0 _snake_case : float = 0.0 _snake_case : bool = False _snake_case : jnp.dtype = jnp.floataa _snake_case : bool = True _snake_case : int = 0 _snake_case : bool = False def snake_case__ ( self : List[Any] , lowerCAmelCase__ : jax.random.KeyArray ) -> FrozenDict: '''simple docstring''' _UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size) _UpperCamelCase = jnp.zeros(lowerCAmelCase__ , dtype=jnp.floataa ) _UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa ) _UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _UpperCamelCase , _UpperCamelCase = jax.random.split(lowerCAmelCase__ ) _UpperCamelCase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["params"] def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = self.block_out_channels _UpperCamelCase = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _UpperCamelCase = self.num_attention_heads or self.attention_head_dim # input _UpperCamelCase = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _UpperCamelCase = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _UpperCamelCase = FlaxTimestepEmbedding(lowerCAmelCase__ , dtype=self.dtype ) _UpperCamelCase = self.only_cross_attention if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = (num_attention_heads,) * len(self.down_block_types ) # down _UpperCamelCase = [] _UpperCamelCase = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = block_out_channels[i] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _UpperCamelCase = FlaxCrossAttnDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxDownBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = down_blocks # mid _UpperCamelCase = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up _UpperCamelCase = [] _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = list(reversed(lowerCAmelCase__ ) ) _UpperCamelCase = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): _UpperCamelCase = output_channel _UpperCamelCase = reversed_block_out_channels[i] _UpperCamelCase = reversed_block_out_channels[min(i + 1 , len(lowerCAmelCase__ ) - 1 )] _UpperCamelCase = i == len(lowerCAmelCase__ ) - 1 if up_block_type == "CrossAttnUpBlock2D": _UpperCamelCase = FlaxCrossAttnUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: _UpperCamelCase = FlaxUpBlockaD( in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(lowerCAmelCase__ ) _UpperCamelCase = output_channel _UpperCamelCase = up_blocks # out _UpperCamelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) _UpperCamelCase = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , jnp.ndarray ): _UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(lowerCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _UpperCamelCase = timesteps.astype(dtype=jnp.floataa ) _UpperCamelCase = jnp.expand_dims(lowerCAmelCase__ , 0 ) _UpperCamelCase = self.time_proj(lowerCAmelCase__ ) _UpperCamelCase = self.time_embedding(lowerCAmelCase__ ) # 2. pre-process _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) _UpperCamelCase = self.conv_in(lowerCAmelCase__ ) # 3. down _UpperCamelCase = (sample,) for down_block in self.down_blocks: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) else: _UpperCamelCase , _UpperCamelCase = down_block(lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: _UpperCamelCase = () for down_block_res_sample, down_block_additional_residual in zip( lowerCAmelCase__ , lowerCAmelCase__ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) _UpperCamelCase = new_down_block_res_samples # 4. mid _UpperCamelCase = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: _UpperCamelCase = down_block_res_samples[-(self.layers_per_block + 1) :] _UpperCamelCase = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = up_block( lowerCAmelCase__ , temb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train , ) else: _UpperCamelCase = up_block(lowerCAmelCase__ , temb=lowerCAmelCase__ , res_hidden_states_tuple=lowerCAmelCase__ , deterministic=not train ) # 6. post-process _UpperCamelCase = self.conv_norm_out(lowerCAmelCase__ ) _UpperCamelCase = nn.silu(lowerCAmelCase__ ) _UpperCamelCase = self.conv_out(lowerCAmelCase__ ) _UpperCamelCase = jnp.transpose(lowerCAmelCase__ , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowerCAmelCase__ )
324
0
import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = BlenderbotSmallTokenizer UpperCamelCase = False def __magic_name__ ( self : Union[str, Any] ): super().setUp() UpperCAmelCase : Dict = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase : List[str] = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__ ) ) ) ) UpperCAmelCase : Dict = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase : Any = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + '''\n''' ) with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase__ ) ) def __magic_name__ ( self : List[str], **__A : str ): kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ ) def __magic_name__ ( self : str, __A : Any ): UpperCAmelCase : Optional[Any] = '''adapt act apte''' UpperCAmelCase : List[str] = '''adapt act apte''' return input_text, output_text def __magic_name__ ( self : Tuple ): UpperCAmelCase : Any = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map ) UpperCAmelCase : Optional[int] = '''adapt act apte''' UpperCAmelCase : int = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase : Tuple = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ ) UpperCAmelCase : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase : List[str] = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ), lowerCAmelCase__ ) def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Any = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [1_3_8_4] UpperCAmelCase : str = '''I am a small frog.''' UpperCAmelCase : str = tok([src_text], padding=lowerCAmelCase__, truncation=lowerCAmelCase__ )['''input_ids'''] UpperCAmelCase : int = tok.batch_decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def __magic_name__ ( self : Union[str, Any] ): UpperCAmelCase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase : Optional[int] = '''I am a small frog .''' UpperCAmelCase : Optional[int] = '''.''' UpperCAmelCase : Any = tok(lowerCAmelCase__ )['''input_ids'''] UpperCAmelCase : List[Any] = tok(lowerCAmelCase__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
336
'''simple docstring''' import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase__ : List[str] = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ 'text-classification', 'language-modeling', 'summarization', 'token-classification', 'question-answering', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase__ : Dict = logging.getLogger() def a__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Tuple, lowercase : Dict="eval" ) -> int: """simple docstring""" _UpperCamelCase = os.path.join(lowercase, F"""{split}_results.json""" ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: return json.load(lowercase ) raise ValueError(F"""can't find {path}""" ) lowercase__ : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def snake_case__ ( self : Any ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_glue.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) @slow def snake_case__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_clm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def snake_case__ ( self : Tuple ) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_summarization_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def snake_case__ ( self : str ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_ta_mlm_flax.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 ) @slow def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_flax_ner.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ): run_qa.main() _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
324
0
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a__ ( SCREAMING_SNAKE_CASE : dict ): '''simple docstring''' return (data["data"], data["target"]) def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' lowerCAmelCase : int = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Predict target for test data lowerCAmelCase : Union[str, Any] = xgb.predict(SCREAMING_SNAKE_CASE ) lowerCAmelCase : Optional[Any] = predictions.reshape(len(SCREAMING_SNAKE_CASE ) , 1 ) return predictions def a__ ( ): '''simple docstring''' lowerCAmelCase : Tuple = fetch_california_housing() lowerCAmelCase , lowerCAmelCase : Union[str, Any] = data_handling(SCREAMING_SNAKE_CASE ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = train_test_split( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 ) lowerCAmelCase : List[Any] = xgboost(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" ) print(f"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
108
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowercase__ : Optional[Any] = logging.getLogger() def a__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def a__ ( lowercase : Dict ) -> int: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = os.path.join(lowercase, '''all_results.json''' ) if os.path.exists(lowercase ): with open(lowercase, '''r''' ) as f: _UpperCamelCase = json.load(lowercase ) else: raise ValueError(F"""can't find {path}""" ) return results def a__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowercase__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @classmethod def snake_case__ ( cls : Optional[int] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__ ( cls : Tuple ) -> int: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Any ) -> Dict: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : int ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : List[str] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) ) @slow def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCAmelCase__ ) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) _UpperCamelCase = get_results(lowerCAmelCase__ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
324
0
"""simple docstring""" from __future__ import annotations import bisect def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : int = 0 ,_lowerCamelCase : int = -1 ) -> int: if hi < 0: _lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Dict = lo + (hi - lo) // 2 if sorted_collection[mid] < item: _lowerCAmelCase : Union[str, Any] = mid + 1 else: _lowerCAmelCase : Dict = mid return lo def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : int = 0 ,_lowerCamelCase : int = -1 ) -> int: if hi < 0: _lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase ) while lo < hi: _lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _lowerCAmelCase : str = mid + 1 else: _lowerCAmelCase : List[Any] = mid return lo def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : int = 0 ,_lowerCamelCase : int = -1 ) -> None: sorted_collection.insert(bisect_left(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : int = 0 ,_lowerCamelCase : int = -1 ) -> None: sorted_collection.insert(bisect_right(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase ) def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> int | None: _lowerCAmelCase : Optional[Any] = 0 _lowerCAmelCase : Tuple = len(_lowerCamelCase ) - 1 while left <= right: _lowerCAmelCase : Tuple = left + (right - left) // 2 _lowerCAmelCase : Optional[Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _lowerCAmelCase : Dict = midpoint - 1 else: _lowerCAmelCase : List[str] = midpoint + 1 return None def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ) -> int | None: _lowerCAmelCase : str = bisect.bisect_left(_lowerCamelCase ,_lowerCamelCase ) if index != len(_lowerCamelCase ) and sorted_collection[index] == item: return index return None def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ,_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> int | None: if right < left: return None _lowerCAmelCase : Union[str, Any] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,midpoint - 1 ) else: return binary_search_by_recursion(_lowerCamelCase ,_lowerCamelCase ,midpoint + 1 ,_lowerCamelCase ) if __name__ == "__main__": _a : Union[str, Any] = input('Enter numbers separated by comma:\n').strip() _a : List[Any] = sorted(int(item) for item in user_input.split(',')) _a : List[Any] = int(input('Enter a single number to be found in the list:\n')) _a : List[str] = binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
44
'''simple docstring''' import itertools import string from collections.abc import Generator, Iterable def a__ ( lowercase : Iterable[str], lowercase : int ) -> Generator[tuple[str, ...], None, None]: """simple docstring""" _UpperCamelCase = iter(lowercase ) while True: _UpperCamelCase = tuple(itertools.islice(lowercase, lowercase ) ) if not chunk: return yield chunk def a__ ( lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCamelCase = '''''' if len(lowercase ) < 2: return dirty for i in range(len(lowercase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase ) & 1: clean += "X" return clean def a__ ( lowercase : str ) -> list[str]: """simple docstring""" _UpperCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCamelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase ) return table def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = prepare_input(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def a__ ( lowercase : str, lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = generate_table(lowercase ) _UpperCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase, 2 ): _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) _UpperCamelCase , _UpperCamelCase = divmod(table.index(lowercase ), 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
324
0
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase : Optional[Any] = FileLock(str(tmpdir / 'foo.lock' ) ) __lowerCamelCase : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) ) __lowerCamelCase : Union[str, Any] = 0.01 with locka.acquire(): with pytest.raises(UpperCAmelCase_ ): __lowerCamelCase : Any = time.time() locka.acquire(UpperCAmelCase_ ) assert time.time() - _start > timeout def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> int: __lowerCamelCase : Any = 'a' * 10_00 + '.lock' __lowerCamelCase : List[str] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('.lock' ) assert not locka._lock_file.endswith(UpperCAmelCase_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 __lowerCamelCase : int = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(UpperCAmelCase_ ): locka.acquire(0 )
185
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Any = {'vocab_file': 'spiece.model'} lowercase__ : Dict = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } lowercase__ : Optional[Any] = { 'google/bigbird-roberta-base': 40_96, 'google/bigbird-roberta-large': 40_96, 'google/bigbird-base-trivia-itc': 40_96, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : str = ['input_ids', 'attention_mask'] _snake_case : List[int] = [] def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Dict="[SEP]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[Any]="[CLS]" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : int , ) -> None: '''simple docstring''' _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) _UpperCamelCase = vocab_file _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase__ ) @property def snake_case__ ( self : List[str] ) -> Tuple: '''simple docstring''' return self.sp_model.get_piece_size() def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self : str , lowerCAmelCase__ : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCamelCase = {} _UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : str , lowerCAmelCase__ : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> List[Any]: '''simple docstring''' return self.sp_model.piece_to_id(lowerCAmelCase__ ) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.sp_model.IdToPiece(lowerCAmelCase__ ) return token def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = '''''' _UpperCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCAmelCase__ ) + token _UpperCamelCase = True _UpperCamelCase = [] else: current_sub_tokens.append(lowerCAmelCase__ ) _UpperCamelCase = False out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> str: '''simple docstring''' _UpperCamelCase = kwargs.pop('''use_source_tokenizer''' , lowerCAmelCase__ ) _UpperCamelCase = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCamelCase = [] _UpperCamelCase = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) _UpperCamelCase = [] sub_texts.append(lowerCAmelCase__ ) else: current_sub_text.append(lowerCAmelCase__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: _UpperCamelCase = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(lowerCAmelCase__ ) ) else: _UpperCamelCase = ''''''.join(lowerCAmelCase__ ) _UpperCamelCase = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCamelCase = self.clean_up_tokenization(lowerCAmelCase__ ) return clean_text else: return text def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCamelCase = os.path.join( lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , '''wb''' ) as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,) def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] _UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) + [1] return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1] def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
324
0
import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A__ ( unittest.TestCase ): """simple docstring""" @property def a_ ( self ): torch.manual_seed(0 ) snake_case = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def a_ ( self ): snake_case = self.dummy_uncond_unet snake_case = KarrasVeScheduler() snake_case = KarrasVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case = torch.manual_seed(0 ) snake_case = pipe(num_inference_steps=2 , generator=lowerCAmelCase__ , output_type='''numpy''' ).images snake_case = torch.manual_seed(0 ) snake_case = pipe(num_inference_steps=2 , generator=lowerCAmelCase__ , output_type='''numpy''' , return_dict=lowerCAmelCase__ )[0] snake_case = image[0, -3:, -3:, -1] snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A__ ( unittest.TestCase ): """simple docstring""" def a_ ( self ): snake_case = '''google/ncsnpp-celebahq-256''' snake_case = UNetaDModel.from_pretrained(lowerCAmelCase__ ) snake_case = KarrasVeScheduler() snake_case = KarrasVePipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case = torch.manual_seed(0 ) snake_case = pipe(num_inference_steps=2_0 , generator=lowerCAmelCase__ , output_type='''numpy''' ).images snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) snake_case = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
127
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 'audio-spectrogram-transformer' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = patch_size _UpperCamelCase = qkv_bias _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins
324
0
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _UpperCamelCase : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _UpperCamelCase : Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007 def a_ ( _lowerCAmelCase : Vector , _lowerCAmelCase : Vector ): '''simple docstring''' return np.sqrt(np.sum((np.asarray(_lowerCAmelCase ) - np.asarray(_lowerCAmelCase )) ** 2 ) ) def a_ ( _lowerCAmelCase : Vector , _lowerCAmelCase : Vector ): '''simple docstring''' return sum((va - va) ** 2 for va, va in zip(_lowerCAmelCase , _lowerCAmelCase ) ) ** (1 / 2) if __name__ == "__main__": def a_ ( ): '''simple docstring''' from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) ) benchmark()
77
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig lowercase__ : Union[str, Any] = logging.get_logger(__name__) # General docstring lowercase__ : Dict = 'ResNetConfig' # Base docstring lowercase__ : str = 'microsoft/resnet-50' lowercase__ : Tuple = [1, 20_48, 7, 7] # Image classification docstring lowercase__ : Optional[Any] = 'microsoft/resnet-50' lowercase__ : List[str] = 'tiger cat' lowercase__ : List[Any] = [ 'microsoft/resnet-50', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad( lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) _UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) _UpperCamelCase = config.num_channels def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.pooler(lowerCAmelCase__ ) return embedding class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ ) _UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = self.convolution(lowerCAmelCase__ ) _UpperCamelCase = self.normalization(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = out_channels // reduction _UpperCamelCase = ( ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , ) _UpperCamelCase = ACTaFN[activation] def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowerCAmelCase__ ) _UpperCamelCase = self.shortcut(lowerCAmelCase__ ) hidden_state += residual _UpperCamelCase = self.activation(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int: '''simple docstring''' super().__init__() _UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor: '''simple docstring''' _UpperCamelCase = input for layer in self.layers: _UpperCamelCase = layer(lowerCAmelCase__ ) return hidden_state class __lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]: '''simple docstring''' super().__init__() _UpperCamelCase = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ): self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowerCAmelCase__ ) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : Optional[int] = ResNetConfig _snake_case : Union[str, Any] = 'resnet' _snake_case : Optional[int] = 'pixel_values' _snake_case : int = True def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]: '''simple docstring''' if isinstance(lowerCAmelCase__ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' ) elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]: '''simple docstring''' if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCamelCase = value lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( 'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: '''simple docstring''' _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowerCAmelCase__ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' super().__init__(lowerCAmelCase__ ) _UpperCamelCase = config.num_labels _UpperCamelCase = ResNetModel(lowerCAmelCase__ ) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowerCAmelCase__ ) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = '''single_label_classification''' else: _UpperCamelCase = '''multi_label_classification''' if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() ) else: _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ ) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states ) @add_start_docstrings( '\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , ) class __lowerCAmelCase ( __magic_name__ , __magic_name__ ): """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) super()._init_backbone(lowerCAmelCase__ ) _UpperCamelCase = [config.embedding_size] + config.hidden_sizes _UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ ) _UpperCamelCase = ResNetEncoder(lowerCAmelCase__ ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) @replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput: '''simple docstring''' _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = self.embedder(lowerCAmelCase__ ) _UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ ) _UpperCamelCase = outputs.hidden_states _UpperCamelCase = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: _UpperCamelCase = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
324
0
"""simple docstring""" import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { 'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt', 'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt', 'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt', 'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt', 'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt', 'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt', 'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt', 'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt', 'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt', 'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt', } def __UpperCAmelCase ( __lowerCamelCase ) -> Any: lowercase__ : Union[str, Any] = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(__lowerCamelCase , __lowerCamelCase ) lowerCAmelCase_ = { 'blocks': 'layers', 'mlp.0': 'fc1', 'mlp.2': 'fc2', 'mlp_ln': 'final_layer_norm', '.attn.query': '.self_attn.q_proj', '.attn.key': '.self_attn.k_proj', '.attn.value': '.self_attn.v_proj', '.attn_ln': '.self_attn_layer_norm', '.attn.out': '.self_attn.out_proj', '.cross_attn.query': '.encoder_attn.q_proj', '.cross_attn.key': '.encoder_attn.k_proj', '.cross_attn.value': '.encoder_attn.v_proj', '.cross_attn_ln': '.encoder_attn_layer_norm', '.cross_attn.out': '.encoder_attn.out_proj', 'decoder.ln.': 'decoder.layer_norm.', 'encoder.ln.': 'encoder.layer_norm.', 'token_embedding': 'embed_tokens', 'encoder.positional_embedding': 'encoder.embed_positions.weight', 'decoder.positional_embedding': 'decoder.embed_positions.weight', 'ln_post': 'layer_norm', } def __UpperCAmelCase ( __lowerCamelCase ) -> str: lowercase__ : Optional[Any] = list(s_dict.keys() ) for key in keys: lowercase__ : List[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: lowercase__ : Tuple = new_key.replace(__lowerCamelCase , __lowerCamelCase ) print(f"""{key} -> {new_key}""" ) lowercase__ : str = s_dict.pop(__lowerCamelCase ) return s_dict def __UpperCAmelCase ( __lowerCamelCase ) -> List[str]: lowercase__ , lowercase__ : List[str] = emb.weight.shape lowercase__ : Tuple = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase ) lowercase__ : str = emb.weight.data return lin_layer def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> bytes: os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase ) lowercase__ : List[str] = os.path.basename(__lowerCamelCase ) lowercase__ : Dict = url.split('''/''' )[-2] lowercase__ : Tuple = os.path.join(__lowerCamelCase , __lowerCamelCase ) if os.path.exists(__lowerCamelCase ) and not os.path.isfile(__lowerCamelCase ): raise RuntimeError(f"""{download_target} exists and is not a regular file""" ) if os.path.isfile(__lowerCamelCase ): lowercase__ : Optional[int] = open(__lowerCamelCase , '''rb''' ).read() if hashlib.shaaaa(__lowerCamelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(__lowerCamelCase ) as source, open(__lowerCamelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=__lowerCamelCase , unit_divisor=10_24 ) as loop: while True: lowercase__ : Tuple = source.read(81_92 ) if not buffer: break output.write(__lowerCamelCase ) loop.update(len(__lowerCamelCase ) ) lowercase__ : Any = open(__lowerCamelCase , '''rb''' ).read() if hashlib.shaaaa(__lowerCamelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict: if ".pt" not in checkpoint_path: lowercase__ : Tuple = _download(_MODELS[checkpoint_path] ) else: lowercase__ : Optional[int] = torch.load(__lowerCamelCase , map_location='''cpu''' ) lowercase__ : Any = original_checkpoint['''dims'''] lowercase__ : Optional[Any] = original_checkpoint['''model_state_dict'''] lowercase__ : Union[str, Any] = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(__lowerCamelCase ) rename_keys(__lowerCamelCase ) lowercase__ : str = True lowercase__ : Tuple = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowercase__ : List[str] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__lowerCamelCase , decoder_ffn_dim=__lowerCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowercase__ : Optional[int] = WhisperForConditionalGeneration(__lowerCamelCase ) lowercase__ , lowercase__ : str = model.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) if len(__lowerCamelCase ) > 0 and not set(__lowerCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""" ) if tie_embeds: lowercase__ : Dict = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowercase__ : Optional[Any] = proj_out_weights model.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
16
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def a__ ( lowercase : Union[str, Any] ) -> Tuple: """simple docstring""" if isinstance(lowercase, collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCAmelCase : """simple docstring""" def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple ) -> int: '''simple docstring''' pass def snake_case__ ( self : Any ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float ) -> str: '''simple docstring''' _UpperCamelCase = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def snake_case__ ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) _UpperCamelCase = after_output[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = {'''vision_model''': vision_model, '''text_model''': text_model} _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ ) _UpperCamelCase = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ ) _UpperCamelCase = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _UpperCamelCase = to_atuple(vision_model.config.image_size ) _UpperCamelCase = to_atuple(vision_model.config.patch_size ) _UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _UpperCamelCase = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _UpperCamelCase = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Tuple: '''simple docstring''' pt_model.to(lowerCAmelCase__ ) pt_model.eval() # prepare inputs _UpperCamelCase = inputs_dict _UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _UpperCamelCase = pt_model(**lowerCAmelCase__ ).to_tuple() _UpperCamelCase = fx_model(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ ) _UpperCamelCase = fx_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ ) pt_model_loaded.to(lowerCAmelCase__ ) pt_model_loaded.eval() with torch.no_grad(): _UpperCamelCase = pt_model_loaded(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(lowerCAmelCase__ , pt_output_loaded.numpy() , 4e-2 ) def snake_case__ ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int ) -> Any: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ ) _UpperCamelCase = fx_state self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ) -> str: '''simple docstring''' _UpperCamelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = VisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ ) _UpperCamelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params ) self.check_pt_flax_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__ ) @is_pt_flax_cross_test def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = config_inputs_dict.pop('''vision_config''' ) _UpperCamelCase = config_inputs_dict.pop('''text_config''' ) _UpperCamelCase = config_inputs_dict self.check_equivalence_pt_to_flax(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) self.check_equivalence_flax_to_pt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.get_pretrained_model_and_inputs() _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__ ) _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ) _UpperCamelCase = model_a(**lowerCAmelCase__ ) _UpperCamelCase = after_outputs[0] _UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase__ , 1e-5 ) @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Tuple ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxViTModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = FlaxViTModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = vit_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[str] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=lowerCAmelCase__ , text_from_pt=lowerCAmelCase__ , ) _UpperCamelCase = 13 _UpperCamelCase = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _UpperCamelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _UpperCamelCase = random_attention_mask([batch_size, 4] ) _UpperCamelCase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ) -> List[str]: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModel(lowerCAmelCase__ ) _UpperCamelCase = FlaxBertModel(lowerCAmelCase__ ) return vision_model, text_model def snake_case__ ( self : List[str] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxCLIPVisionModelTester(self ) _UpperCamelCase = FlaxBertModelTester(self ) _UpperCamelCase = clip_model_tester.prepare_config_and_inputs() _UpperCamelCase = bert_model_tester.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = vision_config_and_inputs _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : List[Any] ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _UpperCamelCase = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''np''' ) _UpperCamelCase = model(**lowerCAmelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _UpperCamelCase = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 ) )
324
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger() @dataclass class __lowerCamelCase : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default_factory=snake_case__) UpperCamelCase__ = field(default_factory=snake_case__) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self , UpperCAmelCase ): """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def UpperCamelCase ( self ): """simple docstring""" return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __lowerCamelCase : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 0 UpperCamelCase__ = field(default_factory=snake_case__) UpperCamelCase__ = field(default_factory=snake_case__) def __call__( self , UpperCAmelCase ): """simple docstring""" _UpperCAmelCase = Tracker(self.dest )(lowerCAmelCase__ ).parametrized _UpperCAmelCase = Tracker(self.src )(lowerCAmelCase__ ).parametrized _UpperCAmelCase = list(filter(lambda UpperCAmelCase : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) _UpperCAmelCase = list(filter(lambda UpperCAmelCase : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise Exception( F"""Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while""" F""" destination module has {len(lowerCAmelCase__ )}.""" ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True )-> int: """simple docstring""" print(F"""Converting {name}...""" ) with torch.no_grad(): _UpperCAmelCase = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval() _UpperCAmelCase = ResNetForImageClassification(__lowerCAmelCase ).eval() _UpperCAmelCase = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase ) _UpperCAmelCase = torch.randn((1, 3, 224, 224) ) module_transfer(__lowerCAmelCase ) assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one." _UpperCAmelCase = F"""resnet{"-".join(name.split("resnet" ) )}""" print(__lowerCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=__lowerCAmelCase , ) # we can use the convnext one _UpperCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=__lowerCAmelCase , ) print(F"""Pushed {checkpoint_name}""" ) def __A ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True )-> Optional[int]: """simple docstring""" _UpperCAmelCase = 'imagenet-1k-id2label.json' _UpperCAmelCase = 1_000 _UpperCAmelCase = (1, num_labels) _UpperCAmelCase = 'huggingface/label-files' _UpperCAmelCase = num_labels _UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) ) _UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} _UpperCAmelCase = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase ) _UpperCAmelCase = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return config, expected_shape if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) _a = parser.parse_args() _a = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
39
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Optional[int]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : int=16 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Any=4 , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_attention_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_choices def snake_case__ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCamelCase = None if self.use_attention_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCamelCase = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case__ ( self : Union[str, Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Dict = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def snake_case__ ( self : Optional[int] ) -> Dict: '''simple docstring''' _UpperCamelCase = FlaxAlbertModelTester(self ) @slow def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' for model_class_name in self.all_model_classes: _UpperCamelCase = model_class_name.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) _UpperCamelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) _UpperCamelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0] _UpperCamelCase = (1, 11, 768) self.assertEqual(output.shape , lowerCAmelCase__ ) _UpperCamelCase = np.array( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
324
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase__ = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = ['GLPNFeatureExtractor'] UpperCAmelCase__ = ['GLPNImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ 'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST', 'GLPNForDepthEstimation', 'GLPNLayer', 'GLPNModel', 'GLPNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
289
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=7 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : Optional[Any]=18 , lowerCAmelCase__ : Union[str, Any]=30 , lowerCAmelCase__ : Any=400 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : str=[0.5, 0.5, 0.5] , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , ) -> List[str]: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 18} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std def snake_case__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : Tuple = LevitImageProcessor if is_vision_available() else None def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = LevitImageProcessingTester(self ) @property def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Tuple ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) ) def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
324
0