code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : List[Any]=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : List[str]=10 , _snake_case : List[Any]=[8, 16, 32, 64] , _snake_case : Any=[1, 1, 2, 1] , _snake_case : Any=True , _snake_case : List[Any]=True , _snake_case : Union[str, Any]="relu" , _snake_case : int=3 , _snake_case : Tuple=None , _snake_case : Union[str, Any]=["stage2", "stage3", "stage4"] , _snake_case : Tuple=[2, 3, 4] , _snake_case : List[Any]=1 , ): __lowercase : Dict = parent __lowercase : Union[str, Any] = batch_size __lowercase : List[Any] = image_size __lowercase : Any = num_channels __lowercase : Dict = embeddings_size __lowercase : Union[str, Any] = hidden_sizes __lowercase : Optional[Any] = depths __lowercase : int = is_training __lowercase : Optional[Any] = use_labels __lowercase : Tuple = hidden_act __lowercase : str = num_labels __lowercase : str = scope __lowercase : Dict = len(lowercase_ ) __lowercase : Optional[Any] = out_features __lowercase : int = out_indices __lowercase : Union[str, Any] = num_groups def snake_case_ ( self : Any ): __lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : List[str] = None if self.use_labels: __lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) __lowercase : Tuple = self.get_config() return config, pixel_values, labels def snake_case_ ( self : List[str] ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def snake_case_ ( self : Dict , _snake_case : int , _snake_case : Optional[int] , _snake_case : Optional[Any] ): __lowercase : Any = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowercase : Optional[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case_ ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple ): __lowercase : int = self.num_labels __lowercase : List[Any] = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() __lowercase : Union[str, Any] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case_ ( self : int , _snake_case : str , _snake_case : Dict , _snake_case : List[str] ): __lowercase : List[str] = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowercase : Any = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __lowercase : Dict = None __lowercase : Optional[Any] = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() __lowercase : Any = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case_ ( self : Tuple ): __lowercase : Any = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs __lowercase : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" A__ : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () A__ : str = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) A__ : Optional[int] = False A__ : List[Any] = False A__ : str = False A__ : Optional[Any] = False A__ : Optional[Any] = False def snake_case_ ( self : Union[str, Any] ): __lowercase : int = BitModelTester(self ) __lowercase : str = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def snake_case_ ( self : Dict ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self : Union[str, Any] ): return @unittest.skip(reason='''Bit does not output attentions''' ) def snake_case_ ( self : Tuple ): pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def snake_case_ ( self : Dict ): pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def snake_case_ ( self : Optional[Any] ): pass def snake_case_ ( self : List[str] ): __lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : int = model_class(lowercase_ ) __lowercase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : List[str] = [*signature.parameters.keys()] __lowercase : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowercase_ ) def snake_case_ ( self : List[Any] ): __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case_ ( self : List[str] ): __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def snake_case_ ( self : Any ): __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Tuple = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) def snake_case_ ( self : Dict ): def check_hidden_states_output(_snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Dict ): __lowercase : Optional[Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): __lowercase : Tuple = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) __lowercase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __lowercase : str = self.model_tester.num_stages self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : List[Any] = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __lowercase : Any = layer_type __lowercase : Dict = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : str = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def snake_case_ ( self : int ): pass def snake_case_ ( self : Any ): __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case_ ( self : str ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Any = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def UpperCAmelCase_ ( ) -> str: __lowercase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case_ ( self : Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case_ ( self : Optional[int] ): __lowercase : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) __lowercase : Dict = self.default_image_processor __lowercase : int = prepare_img() __lowercase : Dict = image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ ) # forward pass with torch.no_grad(): __lowercase : Union[str, Any] = model(**lowercase_ ) # verify the logits __lowercase : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase_ ) __lowercase : List[str] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @require_torch class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" A__ : List[Any] = (BitBackbone,) if is_torch_available() else () A__ : int = BitConfig A__ : Optional[Any] = False def snake_case_ ( self : Tuple ): __lowercase : Optional[Any] = BitModelTester(self )
156
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float: '''simple docstring''' A__ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: A__ = 1 - (matter_density + radiation_density + dark_energy) A__ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) A__ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowercase_ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
7
0
"""simple docstring""" import math import tensorflow as tf from packaging import version def a__ ( snake_case__ ) -> int: lowerCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) lowerCamelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def a__ ( snake_case__ ) -> Optional[Any]: lowerCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) lowerCamelCase = tf.cast(math.pi , x.dtype ) lowerCamelCase = tf.cast(0.04_4715 , x.dtype ) lowerCamelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE__ , 3 )) )) return x * cdf def a__ ( snake_case__ ) -> Optional[Any]: lowerCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE__ ) ) def a__ ( snake_case__ ) -> int: lowerCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) lowerCamelCase = tf.cast(0.04_4715 , x.dtype ) lowerCamelCase = tf.cast(0.79_7884_5608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def a__ ( snake_case__ ) -> Union[str, Any]: lowerCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) lowerCamelCase = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def a__ ( snake_case__ ) -> Optional[int]: return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE__ ) , -10 , 10 ) def a__ ( snake_case__ , snake_case__=-1 ) -> List[Any]: lowerCamelCase , lowerCamelCase = tf.split(SCREAMING_SNAKE_CASE__ , 2 , axis=SCREAMING_SNAKE_CASE__ ) return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE__ ) if version.parse(tf.version.VERSION) >= version.parse("""2.4"""): def a__ ( snake_case__ ) -> Dict: return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE__ , approximate=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase : int = tf.keras.activations.gelu lowerCAmelCase : List[str] = approximate_gelu_wrap else: lowerCAmelCase : str = _gelu lowerCAmelCase : List[str] = _gelu_new lowerCAmelCase : Tuple = { """gelu""": gelu, """gelu_10""": gelu_aa, """gelu_fast""": gelu_fast, """gelu_new""": gelu_new, """glu""": glu, """mish""": mish, """quick_gelu""": quick_gelu, """relu""": tf.keras.activations.relu, """sigmoid""": tf.keras.activations.sigmoid, """silu""": tf.keras.activations.swish, """swish""": tf.keras.activations.swish, """tanh""": tf.keras.activations.tanh, } def a__ ( snake_case__ ) -> int: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
291
from typing import Union import fire import torch from tqdm import tqdm def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) A__ = v.half() if save_path is None: # overwrite src_path A__ = src_path torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": fire.Fire(convert)
7
0
'''simple docstring''' def snake_case_ (_a : list , _a : list ): _validate_point(SCREAMING_SNAKE_CASE__ ) _validate_point(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) def snake_case_ (_a : list[float] ): if point: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for item in point: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): UpperCAmelCase = ( '''Expected a list of numbers as input, found ''' F"{type(SCREAMING_SNAKE_CASE__ ).__name__}" ) raise TypeError(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase = F"Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}" raise TypeError(SCREAMING_SNAKE_CASE__ ) else: raise ValueError('''Missing an input''' ) def snake_case_ (_a : list , _a : list ): _validate_point(SCREAMING_SNAKE_CASE__ ) _validate_point(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
34
import os # Precomputes a list of the 100 first triangular numbers lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def _snake_case( ) -> int: '''simple docstring''' A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) ) A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' ) A__ = '' with open(SCREAMING_SNAKE_CASE__ ) as f: A__ = f.readline() A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A__ = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(solution())
7
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = {'vocab_file': 'spiece.model'} UpperCAmelCase_ = { 'vocab_file': { 'bert_for_seq_generation': ( 'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model' ), } } UpperCAmelCase_ = {'bert_for_seq_generation': 512} class lowercase__ ( _UpperCAmelCase ): '''simple docstring''' a : int = VOCAB_FILES_NAMES a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : int = [] a : Tuple = ["input_ids", "attention_mask"] def __init__( self, __magic_name__, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<::::>", __magic_name__ = None, **__magic_name__, ) -> None: """simple docstring""" UpperCamelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, sep_token=lowercase_, sp_model_kwargs=self.sp_model_kwargs, **lowercase_, ) UpperCamelCase__ : Dict = vocab_file UpperCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) @property def UpperCamelCase__ ( self ) -> str: """simple docstring""" return self.sp_model.get_piece_size() def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase__ : Optional[int] = self.__dict__.copy() UpperCamelCase__ : Any = None return state def __setstate__( self, __magic_name__ ) -> List[Any]: """simple docstring""" UpperCamelCase__ : List[str] = d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): UpperCamelCase__ : List[Any] = {} UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowercase_, out_type=lowercase_ ) def UpperCamelCase__ ( self, __magic_name__ ) -> Any: """simple docstring""" return self.sp_model.piece_to_id(lowercase_ ) def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : str = self.sp_model.IdToPiece(lowercase_ ) return token def UpperCamelCase__ ( self, __magic_name__ ) -> int: """simple docstring""" UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_ ) + token UpperCamelCase__ : Union[str, Any] = [] else: current_sub_tokens.append(lowercase_ ) out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCamelCase__ : Dict = os.path.join( lowercase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_, '''wb''' ) as fi: UpperCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
201
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase_ = False @skip_mps class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionAttendAndExcitePipeline lowerCamelCase = False lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def snake_case__ ( cls : Any )-> Optional[Any]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[Any] )-> Dict: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,) A__ = DDIMScheduler( beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,) A__ = CLIPTextModel(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = A__ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape,(1, 6_4, 6_4, 3) ) A__ = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) A__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_,1E-3 ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def snake_case__ ( self : str )-> int: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : Any )-> Optional[int]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : int )-> List[Any]: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = torch.manual_seed(5_1 ) A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa ) pipe.to('cuda' ) A__ = 'a painting of an elephant with glasses' A__ = [5, 7] A__ = pipe( prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0] A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
7
0
"""simple docstring""" from math import sqrt def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCAmelCase__ (lowerCAmelCase_ = 1_0001 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE__ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE__ ): count += 1 return number if __name__ == "__main__": print(F"{solution() = }")
54
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) else: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A__ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: A__ = 'cpu' A__ = Path(SCREAMING_SNAKE_CASE__ ) # VAE DECODER A__ = AutoencoderKL.from_pretrained(model_path + '/vae' ) A__ = vae_decoder.config.latent_channels # forward only through the decoder part A__ = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE__ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE__ , ) del vae_decoder if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowercase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
7
0
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a_ = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a_ = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a_ = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a_ = { """num_train_timesteps""": 40, """sigma_min""": 0.002, """sigma_max""": 80.0, } a_ = { """num_train_timesteps""": 201, """sigma_min""": 0.002, """sigma_max""": 80.0, } a_ = { """num_train_timesteps""": 151, """sigma_min""": 0.002, """sigma_max""": 80.0, } def __lowercase ( snake_case_ : List[Any] ) ->List[Any]: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def __lowercase ( snake_case_ : Dict ,snake_case_ : Optional[int] ,snake_case_ : List[Any] ,snake_case_ : Optional[int] ,snake_case_ : List[Any]=False ) ->Any: '''simple docstring''' __A : Optional[int] = checkpoint[F"""{old_prefix}.in_layers.0.weight"""] __A : List[Any] = checkpoint[F"""{old_prefix}.in_layers.0.bias"""] __A : Tuple = checkpoint[F"""{old_prefix}.in_layers.2.weight"""] __A : int = checkpoint[F"""{old_prefix}.in_layers.2.bias"""] __A : List[Any] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""] __A : Union[str, Any] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""] __A : List[str] = checkpoint[F"""{old_prefix}.out_layers.0.weight"""] __A : int = checkpoint[F"""{old_prefix}.out_layers.0.bias"""] __A : Dict = checkpoint[F"""{old_prefix}.out_layers.3.weight"""] __A : Optional[int] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""] if has_skip: __A : int = checkpoint[F"""{old_prefix}.skip_connection.weight"""] __A : Optional[int] = checkpoint[F"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def __lowercase ( snake_case_ : List[str] ,snake_case_ : List[str] ,snake_case_ : List[Any] ,snake_case_ : List[Any] ,snake_case_ : List[str]=None ) ->Tuple: '''simple docstring''' __A , __A , __A : Dict = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 ,dim=0 ) __A , __A , __A : List[Any] = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 ,dim=0 ) __A : Union[str, Any] = checkpoint[F"""{old_prefix}.norm.weight"""] __A : int = checkpoint[F"""{old_prefix}.norm.bias"""] __A : str = weight_q.squeeze(-1 ).squeeze(-1 ) __A : Optional[Any] = bias_q.squeeze(-1 ).squeeze(-1 ) __A : Optional[int] = weight_k.squeeze(-1 ).squeeze(-1 ) __A : int = bias_k.squeeze(-1 ).squeeze(-1 ) __A : Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) __A : Optional[int] = bias_v.squeeze(-1 ).squeeze(-1 ) __A : Optional[Any] = ( checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) __A : Tuple = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __lowercase ( snake_case_ : str ,snake_case_ : Dict ) ->str: '''simple docstring''' __A : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ ,map_location='''cpu''' ) __A : str = {} __A : List[str] = checkpoint['''time_embed.0.weight'''] __A : Optional[int] = checkpoint['''time_embed.0.bias'''] __A : int = checkpoint['''time_embed.2.weight'''] __A : int = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: __A : Optional[int] = checkpoint['''label_emb.weight'''] __A : Dict = checkpoint['''input_blocks.0.0.weight'''] __A : str = checkpoint['''input_blocks.0.0.bias'''] __A : str = unet_config['''down_block_types'''] __A : Optional[int] = unet_config['''layers_per_block'''] __A : str = unet_config['''attention_head_dim'''] __A : str = unet_config['''block_out_channels'''] __A : List[Any] = 1 __A : List[Any] = channels_list[0] for i, layer_type in enumerate(SCREAMING_SNAKE_CASE__ ): __A : List[str] = channels_list[i] __A : Union[str, Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(SCREAMING_SNAKE_CASE__ ): __A : List[Any] = F"""down_blocks.{i}.resnets.{j}""" __A : List[str] = F"""input_blocks.{current_layer}.0""" __A : str = True if j == 0 and downsample_block_has_skip else False __A : Tuple = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,has_skip=SCREAMING_SNAKE_CASE__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(SCREAMING_SNAKE_CASE__ ): __A : Optional[Any] = F"""down_blocks.{i}.resnets.{j}""" __A : Any = F"""input_blocks.{current_layer}.0""" __A : Dict = True if j == 0 and downsample_block_has_skip else False __A : str = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,has_skip=SCREAMING_SNAKE_CASE__ ) __A : Union[str, Any] = F"""down_blocks.{i}.attentions.{j}""" __A : str = F"""input_blocks.{current_layer}.1""" __A : List[Any] = convert_attention( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE__ ) - 1: __A : List[Any] = F"""down_blocks.{i}.downsamplers.0""" __A : Dict = F"""input_blocks.{current_layer}.0""" __A : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) current_layer += 1 __A : Union[str, Any] = current_channels # hardcoded the mid-block for now __A : Optional[Any] = '''mid_block.resnets.0''' __A : int = '''middle_block.0''' __A : int = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) __A : List[Any] = '''mid_block.attentions.0''' __A : List[str] = '''middle_block.1''' __A : int = convert_attention(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) __A : Any = '''mid_block.resnets.1''' __A : Optional[Any] = '''middle_block.2''' __A : Any = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) __A : int = 0 __A : Optional[int] = unet_config['''up_block_types'''] for i, layer_type in enumerate(SCREAMING_SNAKE_CASE__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __A : Optional[int] = F"""up_blocks.{i}.resnets.{j}""" __A : Any = F"""output_blocks.{current_layer}.0""" __A : Dict = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,has_skip=SCREAMING_SNAKE_CASE__ ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE__ ) - 1: __A : Optional[Any] = F"""up_blocks.{i}.upsamplers.0""" __A : Optional[Any] = F"""output_blocks.{current_layer-1}.1""" __A : Optional[Any] = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __A : Tuple = F"""up_blocks.{i}.resnets.{j}""" __A : Union[str, Any] = F"""output_blocks.{current_layer}.0""" __A : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,has_skip=SCREAMING_SNAKE_CASE__ ) __A : Union[str, Any] = F"""up_blocks.{i}.attentions.{j}""" __A : str = F"""output_blocks.{current_layer}.1""" __A : Optional[int] = convert_attention( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) current_layer += 1 if i != len(SCREAMING_SNAKE_CASE__ ) - 1: __A : str = F"""up_blocks.{i}.upsamplers.0""" __A : List[Any] = F"""output_blocks.{current_layer-1}.2""" __A : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) __A : List[Any] = checkpoint['''out.0.weight'''] __A : Optional[Any] = checkpoint['''out.0.bias'''] __A : Union[str, Any] = checkpoint['''out.2.weight'''] __A : int = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a_ = parser.parse_args() a_ = strabool(args.class_cond) a_ = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a_ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a_ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a_ = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a_ = None a_ = con_pt_to_diffuser(args.unet_path, unet_config) a_ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a_ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a_ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a_ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') a_ = CMStochasticIterativeScheduler(**scheduler_config) a_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
179
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class UpperCAmelCase ( _UpperCAmelCase ): '''simple docstring''' lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
187
class A : """simple docstring""" def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : int )-> Tuple: '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def snake_case__ ( self : Any )-> str: '''simple docstring''' return self.value def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' return self.name def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return self.weight def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' return self.value / self.weight def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
7
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
285
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class A ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'resnet' lowerCamelCase = ['basic', 'bottleneck'] def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]: '''simple docstring''' super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A__ = num_channels A__ = embedding_size A__ = hidden_sizes A__ = depths A__ = layer_type A__ = hidden_act A__ = downsample_in_first_stage A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = version.parse('1.11' ) @property def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case__ ( self : Any )-> float: '''simple docstring''' return 1E-3
7
0
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class _lowercase ( nn.Module ): def __init__( self : List[str] ) -> Union[str, Any]: """simple docstring""" super().__init__() UpperCamelCase_ : Tuple = nn.Linear(3 , 4 ) UpperCamelCase_ : List[Any] = nn.BatchNormad(4 ) UpperCamelCase_ : str = nn.Linear(4 , 5 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[Any] ) -> Any: """simple docstring""" return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) ) class _lowercase ( _UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : List[str] , *snake_case : Optional[int] , **snake_case : Tuple ) -> Any: """simple docstring""" return (args[0] + 1,) + args[1:], kwargs class _lowercase ( _UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : int , snake_case : Optional[Any] ) -> List[str]: """simple docstring""" return output + 1 class _lowercase ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" UpperCamelCase_ : Optional[Any] = ModelForTest() UpperCamelCase_ : List[str] = ModelHook() add_hook_to_module(lowercase_ , lowercase_ ) self.assertEqual(test_model._hf_hook , lowercase_ ) self.assertTrue(hasattr(lowercase_ , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowercase_ ) self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) ) self.assertFalse(hasattr(lowercase_ , '_old_forward' ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCamelCase_ : Union[str, Any] = ModelForTest() UpperCamelCase_ : Any = ModelHook() add_hook_to_module(lowercase_ , lowercase_ ) add_hook_to_module(lowercase_ , lowercase_ , append=lowercase_ ) self.assertEqual(isinstance(test_model._hf_hook , lowercase_ ) , lowercase_ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowercase_ , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowercase_ ) self.assertFalse(hasattr(lowercase_ , '_hf_hook' ) ) self.assertFalse(hasattr(lowercase_ , '_old_forward' ) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: """simple docstring""" UpperCamelCase_ : str = ModelForTest() UpperCamelCase_ : Optional[Any] = torch.randn(2 , 3 ) UpperCamelCase_ : str = test_model(x + 1 ) UpperCamelCase_ : List[str] = test_model(x + 2 ) UpperCamelCase_ : Dict = PreForwardHook() add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : Dict = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain UpperCamelCase_ : str = PreForwardHook() add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : str = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks UpperCamelCase_ : Dict = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : Any = test_model(lowercase_ ) assert torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : int = ModelForTest() UpperCamelCase_ : List[Any] = torch.randn(2 , 3 ) UpperCamelCase_ : Optional[Any] = test_model(lowercase_ ) UpperCamelCase_ : str = PostForwardHook() add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : Union[str, Any] = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain UpperCamelCase_ : int = PostForwardHook() add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : Union[str, Any] = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks UpperCamelCase_ : Any = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : int = test_model(lowercase_ ) assert torch.allclose(lowercase_ , output + 2 , atol=1e-5 ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ : List[str] = ModelForTest() UpperCamelCase_ : Union[str, Any] = torch.randn(2 , 3 ) UpperCamelCase_ : int = test_model(lowercase_ ) UpperCamelCase_ : List[str] = PostForwardHook() add_hook_to_module(lowercase_ , lowercase_ ) UpperCamelCase_ : Dict = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Dict = test_model(lowercase_ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any: """simple docstring""" UpperCamelCase_ : List[str] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device UpperCamelCase_ : Optional[Any] = torch.randn(2 , 3 ) UpperCamelCase_ : Dict = model(lowercase_ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowercase_ , AlignDevicesHook(io_same_device=lowercase_ ) ) UpperCamelCase_ : List[str] = torch.randn(2 , 3 ).to(0 ) UpperCamelCase_ : Optional[Any] = model(lowercase_ ) self.assertEqual(output.device , torch.device(0 ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: """simple docstring""" UpperCamelCase_ : Optional[int] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices UpperCamelCase_ : Union[str, Any] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device UpperCamelCase_ : Any = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowercase_ ) UpperCamelCase_ : Tuple = torch.randn(2 , 3 ) UpperCamelCase_ : Optional[Any] = model(lowercase_ ) self.assertEqual(output.device , lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload UpperCamelCase_ : Optional[int] = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase_ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) UpperCamelCase_ : str = torch.randn(2 , 3 ) UpperCamelCase_ : Optional[int] = model(lowercase_ ) self.assertEqual(output.device , lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any: """simple docstring""" UpperCamelCase_ : Any = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices UpperCamelCase_ : int = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device UpperCamelCase_ : List[str] = torch.device(lowercase_ ) self.assertEqual(model.batchnorm.running_mean.device , lowercase_ ) UpperCamelCase_ : Optional[Any] = torch.randn(2 , 3 ) UpperCamelCase_ : Tuple = model(lowercase_ ) self.assertEqual(output.device , lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowercase_ , execution_device=lowercase_ , offload=lowercase_ , offload_buffers=lowercase_ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) UpperCamelCase_ : Dict = torch.randn(2 , 3 ) UpperCamelCase_ : str = model(lowercase_ ) self.assertEqual(output.device , lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCamelCase_ : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices UpperCamelCase_ : int = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device UpperCamelCase_ : Union[str, Any] = torch.device(lowercase_ ) self.assertEqual(model.batchnorm.running_mean.device , lowercase_ ) UpperCamelCase_ : Optional[Any] = torch.randn(2 , 3 ) UpperCamelCase_ : Dict = model(lowercase_ ) self.assertEqual(output.device , lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowercase_ , execution_device=lowercase_ , offload=lowercase_ , weights_map=model.state_dict() , offload_buffers=lowercase_ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) UpperCamelCase_ : List[str] = torch.randn(2 , 3 ) UpperCamelCase_ : List[Any] = model(lowercase_ ) self.assertEqual(output.device , lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
175
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 't5' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: A__ = 'past_encoder_sequence + sequence' A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) return common_inputs @property def snake_case__ ( self : Any )-> int: '''simple docstring''' return 1_3
7
0
"""simple docstring""" import os import jsonlines import numpy as np from tqdm import tqdm lowerCamelCase_ = 2_0_4_8 lowerCamelCase_ = 4_0_9_6 lowerCamelCase_ = 4_2 lowerCamelCase_ = os.environ.pop("PROCESS_TRAIN", "false") lowerCamelCase_ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def __lowerCamelCase ( a_ : str ) -> List[str]: def choose_first(a_ : str , a_ : List[Any]=False ): assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) == 1: __SCREAMING_SNAKE_CASE :List[Any] = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: __SCREAMING_SNAKE_CASE :Tuple = {k: [a[k]] for k in a} if len(a['''start_token'''] ) > 0: break return a __SCREAMING_SNAKE_CASE :List[Any] = {'''id''': example['''id''']} __SCREAMING_SNAKE_CASE :Dict = example['''annotations'''] __SCREAMING_SNAKE_CASE :Optional[Any] = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: __SCREAMING_SNAKE_CASE :int = ['''yes'''] if 1 in yes_no_answer else ['''no'''] __SCREAMING_SNAKE_CASE :Tuple = [] __SCREAMING_SNAKE_CASE :Tuple = [] __SCREAMING_SNAKE_CASE :Tuple = ['''<cls>'''] else: __SCREAMING_SNAKE_CASE :Optional[int] = ['''short'''] __SCREAMING_SNAKE_CASE :Optional[int] = choose_first(annotation['''short_answers'''] ) if len(out['''start_token'''] ) == 0: # answer will be long if short is not available __SCREAMING_SNAKE_CASE :Tuple = ['''long'''] __SCREAMING_SNAKE_CASE :Dict = choose_first(annotation['''long_answer'''] , is_long_answer=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = [] answer.update(SCREAMING_SNAKE_CASE__ ) # disregard some samples if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]: __SCREAMING_SNAKE_CASE :Optional[Any] = True else: __SCREAMING_SNAKE_CASE :Optional[Any] = False __SCREAMING_SNAKE_CASE :List[str] = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] , SCREAMING_SNAKE_CASE__ ) for k in cols ): raise ValueError('''Issue in ID''' , example['''id'''] ) return answer def __lowerCamelCase ( a_ : List[Any] , a_ : Optional[Any]=False ) -> int: __SCREAMING_SNAKE_CASE :int = _get_single_answer(SCREAMING_SNAKE_CASE__ ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element __SCREAMING_SNAKE_CASE :str = example['''document''']['''tokens'''] __SCREAMING_SNAKE_CASE :str = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) return { "context": " ".join(SCREAMING_SNAKE_CASE__ ), "answer": { "start_token": -1_00, # ignore index in cross-entropy "end_token": -1_00, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples __SCREAMING_SNAKE_CASE :Optional[Any] = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 __SCREAMING_SNAKE_CASE :Optional[Any] = example['''document''']['''tokens'''] __SCREAMING_SNAKE_CASE :Optional[int] = answer['''start_token'''] __SCREAMING_SNAKE_CASE :List[Any] = answer['''end_token'''] __SCREAMING_SNAKE_CASE :int = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 __SCREAMING_SNAKE_CASE :Optional[int] = ''' '''.join(context[start_token:end_token] ) # checking above code if assertion: __SCREAMING_SNAKE_CASE :List[str] = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] __SCREAMING_SNAKE_CASE :int = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] __SCREAMING_SNAKE_CASE :Any = ''' '''.join([old[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) if not is_html[i]] ) if new != old: print('''ID:''' , example['''id'''] ) print('''New:''' , SCREAMING_SNAKE_CASE__ , end='''\n''' ) print('''Old:''' , SCREAMING_SNAKE_CASE__ , end='''\n\n''' ) return { "context": " ".join(SCREAMING_SNAKE_CASE__ ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def __lowerCamelCase ( a_ : Optional[int] , a_ : Optional[Any] , a_ : Optional[Any]=20_48 , a_ : str=40_96 , a_ : Optional[int]=True ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE :List[Any] = get_context_and_ans(SCREAMING_SNAKE_CASE__ , assertion=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } __SCREAMING_SNAKE_CASE :Optional[Any] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids __SCREAMING_SNAKE_CASE :Dict = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element __SCREAMING_SNAKE_CASE :Optional[Any] = [] __SCREAMING_SNAKE_CASE :int = [] __SCREAMING_SNAKE_CASE :int = input_ids[:q_len] __SCREAMING_SNAKE_CASE :Union[str, Any] = range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , max_length - doc_stride ) for i in doc_start_indices: __SCREAMING_SNAKE_CASE :int = i + max_length - q_len __SCREAMING_SNAKE_CASE :Optional[int] = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['''category'''][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_00] * len(SCREAMING_SNAKE_CASE__ ), "end_token": [-1_00] * len(SCREAMING_SNAKE_CASE__ ), "category": category, }, } __SCREAMING_SNAKE_CASE :int = out['''context'''].split() __SCREAMING_SNAKE_CASE :int = splitted_context[answer['''end_token''']] __SCREAMING_SNAKE_CASE :Dict = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=SCREAMING_SNAKE_CASE__ , ).input_ids ) __SCREAMING_SNAKE_CASE :str = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token __SCREAMING_SNAKE_CASE :int = len(tokenizer(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 __SCREAMING_SNAKE_CASE :int = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive __SCREAMING_SNAKE_CASE :List[str] = answer['''start_token'''] __SCREAMING_SNAKE_CASE :Tuple = answer['''end_token'''] if assertion: __SCREAMING_SNAKE_CASE :Any = tokenizer.decode(SCREAMING_SNAKE_CASE__ ) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''' ) print('''OLD:''' , answer['''span'''] ) print('''NEW:''' , SCREAMING_SNAKE_CASE__ , end='''\n\n''' ) if len(SCREAMING_SNAKE_CASE__ ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } __SCREAMING_SNAKE_CASE :Dict = input_ids[:q_len] __SCREAMING_SNAKE_CASE :Tuple = range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , max_length - doc_stride ) __SCREAMING_SNAKE_CASE :str = [] __SCREAMING_SNAKE_CASE :Optional[int] = [] __SCREAMING_SNAKE_CASE :Optional[int] = [] __SCREAMING_SNAKE_CASE :List[Any] = [] # null, yes, no, long, short for i in doc_start_indices: __SCREAMING_SNAKE_CASE :int = i + max_length - q_len __SCREAMING_SNAKE_CASE :Union[str, Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: __SCREAMING_SNAKE_CASE :Union[str, Any] = start_token - i + q_len __SCREAMING_SNAKE_CASE :int = end_token - i + q_len answers_category.append(answer['''category'''][0] ) # ["short"] -> "short" else: __SCREAMING_SNAKE_CASE :Optional[Any] = -1_00 __SCREAMING_SNAKE_CASE :Tuple = -1_00 answers_category.append('''null''' ) __SCREAMING_SNAKE_CASE :Tuple = inputs[-1][start_token : end_token + 1] answers_start_token.append(SCREAMING_SNAKE_CASE__ ) answers_end_token.append(SCREAMING_SNAKE_CASE__ ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' , example['''id'''] ) print('''New:''' , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) ) print('''Old:''' , tokenizer.decode(SCREAMING_SNAKE_CASE__ ) , end='''\n\n''' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def __lowerCamelCase ( a_ : Tuple , a_ : List[Any] , a_ : Optional[int]=20_48 , a_ : Optional[int]=40_96 , a_ : Tuple=False ) -> Optional[int]: __SCREAMING_SNAKE_CASE :Optional[int] = get_strided_contexts_and_ans( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , doc_stride=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , assertion=SCREAMING_SNAKE_CASE__ , ) return example def __lowerCamelCase ( a_ : Optional[Any] , a_ : List[str] ) -> Optional[Any]: with jsonlines.open(SCREAMING_SNAKE_CASE__ , '''a''' ) as writer: for example in tqdm(SCREAMING_SNAKE_CASE__ , total=len(SCREAMING_SNAKE_CASE__ ) , desc='''Saving samples ... ''' ): __SCREAMING_SNAKE_CASE :Optional[int] = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer lowerCamelCase_ = load_dataset("natural_questions") lowerCamelCase_ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") lowerCamelCase_ = data["train" if PROCESS_TRAIN == "true" else "validation"] lowerCamelCase_ = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } lowerCamelCase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) lowerCamelCase_ = data.remove_columns(["annotations", "document", "id", "question"]) print(data) np.random.seed(SEED) lowerCamelCase_ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
191
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A__ = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) A__ = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): A__ = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): A__ = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) A__ , A__ = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ) -> Optional[int]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
7
0
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def lowerCAmelCase ( _lowerCAmelCase : Union[str, Any] ): """simple docstring""" return EnvironmentCommand() class _UpperCamelCase ( _UpperCAmelCase ): @staticmethod def UpperCAmelCase_ ( lowerCamelCase :ArgumentParser ) -> Dict: UpperCAmelCase__ = parser.add_parser("env" ) download_parser.set_defaults(func=lowercase_ ) def UpperCAmelCase_ ( self :List[Any] ) -> List[str]: UpperCAmelCase__ = huggingface_hub.__version__ UpperCAmelCase__ = "not installed" UpperCAmelCase__ = "NA" if is_torch_available(): import torch UpperCAmelCase__ = torch.__version__ UpperCAmelCase__ = torch.cuda.is_available() UpperCAmelCase__ = "not installed" if is_transformers_available(): import transformers UpperCAmelCase__ = transformers.__version__ UpperCAmelCase__ = "not installed" if is_accelerate_available(): import accelerate UpperCAmelCase__ = accelerate.__version__ UpperCAmelCase__ = "not installed" if is_xformers_available(): import xformers UpperCAmelCase__ = xformers.__version__ UpperCAmelCase__ = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def UpperCAmelCase_ ( lowerCamelCase :int ) -> Optional[Any]: return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
169
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , _snake_case : str , _snake_case : Optional[Any]=7 , _snake_case : List[str]=3 , _snake_case : Optional[Any]=18 , _snake_case : int=30 , _snake_case : List[Any]=400 , _snake_case : str=True , _snake_case : List[str]=None , _snake_case : str=True , _snake_case : Optional[int]=None , _snake_case : List[Any]=True , ): __lowercase : int = size if size is not None else {'''shortest_edge''': 20} __lowercase : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __lowercase : Optional[int] = parent __lowercase : List[str] = batch_size __lowercase : List[str] = num_channels __lowercase : Optional[int] = image_size __lowercase : Tuple = min_resolution __lowercase : Optional[int] = max_resolution __lowercase : Optional[int] = do_resize __lowercase : int = size __lowercase : int = do_center_crop __lowercase : str = crop_size __lowercase : int = do_flip_channel_order def snake_case_ ( self : Optional[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" A__ : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def snake_case_ ( self : Optional[Any] ): __lowercase : str = MobileViTImageProcessingTester(self ) @property def snake_case_ ( self : int ): return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ ( self : Optional[Any] ): __lowercase : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , '''do_resize''' ) ) self.assertTrue(hasattr(lowercase_ , '''size''' ) ) self.assertTrue(hasattr(lowercase_ , '''do_center_crop''' ) ) self.assertTrue(hasattr(lowercase_ , '''center_crop''' ) ) self.assertTrue(hasattr(lowercase_ , '''do_flip_channel_order''' ) ) def snake_case_ ( self : Any ): __lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def snake_case_ ( self : int ): pass def snake_case_ ( self : Union[str, Any] ): __lowercase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input __lowercase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowercase : Tuple = image_processing(lowercase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case_ ( self : Optional[Any] ): __lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input __lowercase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowercase : Union[str, Any] = image_processing(lowercase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def snake_case_ ( self : List[str] ): __lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input __lowercase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowercase : Tuple = image_processing(lowercase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
156
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
0
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowerCAmelCase : List[Any] = """true""" def a__ ( snake_case__ , snake_case__=82 , snake_case__=16 ) -> Optional[Any]: set_seed(42 ) lowerCamelCase = RegressionModel() lowerCamelCase = deepcopy(SCREAMING_SNAKE_CASE__ ) lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) lowerCamelCase = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) lowerCamelCase , lowerCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def a__ ( snake_case__ , snake_case__=False ) -> int: lowerCamelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) lowerCamelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(snake_case__ ): lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): lowerCamelCase = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(snake_case__ ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def a__ ( snake_case__ , snake_case__ ) -> str: lowerCamelCase = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) lowerCamelCase = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE__ ) lowerCamelCase , lowerCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]: lowerCamelCase = [] for batch in dataloader: lowerCamelCase , lowerCamelCase = batch.values() with torch.no_grad(): lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowerCamelCase , lowerCamelCase = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) lowerCamelCase , lowerCamelCase = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def a__ ( snake_case__ , snake_case__=82 , snake_case__=False , snake_case__=False , snake_case__=16 ) -> List[Any]: lowerCamelCase , lowerCamelCase , lowerCamelCase = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCamelCase , lowerCamelCase = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def a__ ( snake_case__ = False , snake_case__ = False ) -> str: lowerCamelCase = evaluate.load("""glue""" , """mrpc""" ) lowerCamelCase , lowerCamelCase = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline lowerCamelCase , lowerCamelCase , lowerCamelCase = setup["""no"""] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ ) lowerCamelCase = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch["""labels"""] ) lowerCamelCase = metric.compute() # Then do distributed lowerCamelCase , lowerCamelCase , lowerCamelCase = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ ) lowerCamelCase = outputs.logits.argmax(dim=-1 ) lowerCamelCase = batch["""labels"""] lowerCamelCase , lowerCamelCase = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) lowerCamelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def a__ ( ) -> Optional[Any]: lowerCamelCase = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowerCamelCase = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) lowerCamelCase = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 5_12 ) accelerator.state._reset_state() def a__ ( snake_case__ ) -> Union[str, Any]: main() if __name__ == "__main__": main()
291
from timeit import timeit def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: number &= number - 1 result += 1 return result def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _snake_case( ) -> None: '''simple docstring''' def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None: A__ = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: A =None A =logging.get_logger(__name__) A ={'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} A ={ 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } A ={ 'facebook/nllb-large-en-ro': 10_24, 'facebook/nllb-200-distilled-600M': 10_24, } # fmt: off A =['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _a ( _UpperCAmelCase ): __a : List[str] = VOCAB_FILES_NAMES __a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : List[str] = PRETRAINED_VOCAB_FILES_MAP __a : Optional[Any] = ["""input_ids""", """attention_mask"""] __a : Optional[int] = NllbTokenizer __a : Dict = [] __a : str = [] def __init__( self : Optional[Any] , lowercase : List[str]=None , lowercase : Union[str, Any]=None , lowercase : List[str]="<s>" , lowercase : Tuple="</s>" , lowercase : List[Any]="</s>" , lowercase : Dict="<s>" , lowercase : Union[str, Any]="<unk>" , lowercase : Union[str, Any]="<pad>" , lowercase : int="<mask>" , lowercase : Tuple=None , lowercase : Union[str, Any]=None , lowercase : Optional[Any]=None , lowercase : int=False , **lowercase : List[str] , ): '''simple docstring''' UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token UpperCAmelCase = legacy_behaviour super().__init__( vocab_file=lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , legacy_behaviour=lowercase_ , **lowercase_ , ) UpperCAmelCase = vocab_file UpperCAmelCase = False if not self.vocab_file else True UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCAmelCase = { lang_code: self.convert_tokens_to_ids(lowercase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase = src_lang if src_lang is not None else '''eng_Latn''' UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def A ( self : Union[str, Any] ): '''simple docstring''' return self._src_lang @src_lang.setter def A ( self : List[Any] , lowercase : str ): '''simple docstring''' UpperCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : str , lowercase : List[int] , lowercase : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A ( self : Optional[int] , lowercase : Dict , lowercase : str , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Tuple ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase = src_lang UpperCAmelCase = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ ) UpperCAmelCase = self.convert_tokens_to_ids(lowercase_ ) UpperCAmelCase = tgt_lang_id return inputs def A ( self : Dict , lowercase : List[str] , lowercase : str = "eng_Latn" , lowercase : Optional[List[str]] = None , lowercase : str = "fra_Latn" , **lowercase : Tuple , ): '''simple docstring''' UpperCAmelCase = src_lang UpperCAmelCase = tgt_lang return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ ) def A ( self : Dict ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def A ( self : Optional[int] ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : str , lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = self.convert_tokens_to_ids(lowercase_ ) if self.legacy_behaviour: UpperCAmelCase = [] UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase = [self.cur_lang_code] UpperCAmelCase = [self.eos_token_id] UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self : Optional[Any] , lowercase : str ): '''simple docstring''' UpperCAmelCase = self.convert_tokens_to_ids(lowercase_ ) if self.legacy_behaviour: UpperCAmelCase = [] UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase = [self.cur_lang_code] UpperCAmelCase = [self.eos_token_id] UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def A ( self : Dict , lowercase : str , lowercase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory." ) return UpperCAmelCase = os.path.join( lowercase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
34
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict=False , __UpperCAmelCase: Optional[Any]=False ) -> str: UpperCamelCase__ : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCamelCase__ : Optional[Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"{prefix}blocks.{i}.norm1.weight", f"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm1.bias", f"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.weight", f"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (f"{prefix}blocks.{i}.attn.proj.bias", f"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.weight", f"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"{prefix}blocks.{i}.norm2.bias", f"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.weight", f"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc1.bias", f"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.weight", f"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"{prefix}blocks.{i}.mlp.fc2.bias", f"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (f"{prefix}cls_token", '''beit.embeddings.cls_token'''), (f"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''), (f"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''), (f"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Dict=False , __UpperCAmelCase: int=False ) -> List[str]: for i in range(config.num_hidden_layers ): UpperCamelCase__ : List[Any] = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCamelCase__ : Tuple = state_dict.pop(f"{prefix}blocks.{i}.attn.qkv.weight" ) UpperCamelCase__ : Any = state_dict.pop(f"{prefix}blocks.{i}.attn.q_bias" ) UpperCamelCase__ : Any = state_dict.pop(f"{prefix}blocks.{i}.attn.v_bias" ) UpperCamelCase__ : Optional[Any] = in_proj_weight[ : config.hidden_size, : ] UpperCamelCase__ : Union[str, Any] = q_bias UpperCamelCase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCamelCase__ : Any = in_proj_weight[ -config.hidden_size :, : ] UpperCamelCase__ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCamelCase__ : Dict = state_dict.pop(f"{prefix}blocks.{i}.gamma_1" ) UpperCamelCase__ : int = state_dict.pop(f"{prefix}blocks.{i}.gamma_2" ) UpperCamelCase__ : List[Any] = gamma_a UpperCamelCase__ : List[Any] = gamma_a def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] ) -> Dict: UpperCamelCase__ : Any = dct.pop(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Dict = val def lowerCAmelCase_ ( ) -> str: UpperCamelCase__ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCamelCase__ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[str]=False ) -> str: UpperCamelCase__ : Optional[int] = False if '''rvlcdip''' in checkpoint_url else True UpperCamelCase__ : List[str] = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCamelCase__ : List[Any] = 1024 UpperCamelCase__ : Dict = 4096 UpperCamelCase__ : Union[str, Any] = 24 UpperCamelCase__ : Optional[int] = 16 # labels if "rvlcdip" in checkpoint_url: UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Tuple = '''huggingface/label-files''' UpperCamelCase__ : Optional[Any] = '''rvlcdip-id2label.json''' UpperCamelCase__ : Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase__ : List[str] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCamelCase__ : Optional[int] = idalabel UpperCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCamelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model'''] UpperCamelCase__ : str = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ ) # load HuggingFace model UpperCamelCase__ : str = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # Check outputs on an image UpperCamelCase__ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Any = prepare_img() UpperCamelCase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) UpperCamelCase__ : Optional[int] = encoding['''pixel_values'''] UpperCamelCase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ) UpperCamelCase__ : Union[str, Any] = outputs.logits # verify logits UpperCamelCase__ : str = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected" Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: if has_lm_head: UpperCamelCase__ : Tuple = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCamelCase__ : List[Any] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', ) UpperCAmelCase_ = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
201
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : List[Any] = { '''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''], '''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Tuple = [ '''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AdaptiveEmbedding''', '''TransfoXLForSequenceClassification''', '''TransfoXLLMHeadModel''', '''TransfoXLModel''', '''TransfoXLPreTrainedModel''', '''load_tf_weights_in_transfo_xl''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ '''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFAdaptiveEmbedding''', '''TFTransfoXLForSequenceClassification''', '''TFTransfoXLLMHeadModel''', '''TFTransfoXLMainLayer''', '''TFTransfoXLModel''', '''TFTransfoXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
54
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __snake_case ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = StableDiffusionPanoramaPipeline _lowerCamelCase = TEXT_TO_IMAGE_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase__( self ): '''simple docstring''' torch.manual_seed(0 ) __A : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __A : List[Any] = DDIMScheduler() torch.manual_seed(0 ) __A : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __A : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __A : List[Any] = CLIPTextModel(lowercase_ ) __A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __A : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=0 ): '''simple docstring''' __A : Optional[int] = torch.manual_seed(lowercase_ ) __A : int = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __A : str = self.get_dummy_components() __A : List[Any] = StableDiffusionPanoramaPipeline(**lowercase_ ) __A : Dict = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) __A : Optional[Any] = self.get_dummy_inputs(lowercase_ ) __A : Optional[int] = sd_pipe(**lowercase_ ).images __A : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A : int = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase__( self ): '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__( self ): '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 ) def UpperCamelCase__( self ): '''simple docstring''' __A : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator __A : str = self.get_dummy_components() __A : Optional[int] = StableDiffusionPanoramaPipeline(**lowercase_ ) __A : Tuple = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) __A : Dict = self.get_dummy_inputs(lowercase_ ) __A : Tuple = '''french fries''' __A : Any = sd_pipe(**lowercase_ , negative_prompt=lowercase_ ) __A : Tuple = output.images __A : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A : Dict = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase__( self ): '''simple docstring''' __A : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __A : List[str] = self.get_dummy_components() __A : Tuple = StableDiffusionPanoramaPipeline(**lowercase_ ) __A : Optional[int] = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) __A : Optional[int] = self.get_dummy_inputs(lowercase_ ) __A : Union[str, Any] = sd_pipe(**lowercase_ , view_batch_size=2 ) __A : Tuple = output.images __A : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A : Tuple = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase__( self ): '''simple docstring''' __A : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator __A : Optional[Any] = self.get_dummy_components() __A : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' ) __A : Optional[Any] = StableDiffusionPanoramaPipeline(**lowercase_ ) __A : Optional[Any] = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) __A : List[Any] = self.get_dummy_inputs(lowercase_ ) __A : List[str] = sd_pipe(**lowercase_ ).images __A : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A : Optional[int] = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCamelCase__( self ): '''simple docstring''' __A : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator __A : List[Any] = self.get_dummy_components() __A : Optional[int] = PNDMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=lowercase_ ) __A : str = StableDiffusionPanoramaPipeline(**lowercase_ ) __A : List[Any] = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) __A : List[Any] = self.get_dummy_inputs(lowercase_ ) __A : Dict = sd_pipe(**lowercase_ ).images __A : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __A : List[Any] = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __snake_case ( unittest.TestCase ): """simple docstring""" def UpperCamelCase__( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__( self , __lowerCamelCase=0 ): '''simple docstring''' __A : Union[str, Any] = torch.manual_seed(lowercase_ ) __A : Optional[int] = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__( self ): '''simple docstring''' __A : Tuple = '''stabilityai/stable-diffusion-2-base''' __A : Optional[int] = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) __A : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() __A : Optional[int] = self.get_inputs() __A : Tuple = pipe(**lowercase_ ).images __A : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __A : Dict = np.array( [ 0.3_6_9_6_8_3_9_2, 0.2_7_0_2_5_3_7_2, 0.3_2_4_4_6_7_6_6, 0.2_8_3_7_9_3_8_7, 0.3_6_3_6_3_2_7_4, 0.3_0_7_3_3_3_4_7, 0.2_7_1_0_0_0_2_7, 0.2_7_0_5_4_1_2_5, 0.2_5_5_3_6_0_9_6, ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-2 def UpperCamelCase__( self ): '''simple docstring''' __A : Tuple = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=lowercase_ ) __A : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() __A : Dict = self.get_inputs() __A : int = pipe(**lowercase_ ).images __A : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) __A : Tuple = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase__( self ): '''simple docstring''' __A : Union[str, Any] = 0 def callback_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None: __A : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __A : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __A : Union[str, Any] = latents[0, -3:, -3:, -1] __A : List[str] = np.array( [ 0.1_8_6_8_1_8_6_9, 0.3_3_9_0_7_8_1_6, 0.5_3_6_1_2_7_6, 0.1_4_4_3_2_8_6_5, -0.0_2_8_5_6_6_1_1, -0.7_3_9_4_1_1_2_3, 0.2_3_3_9_7_9_8_7, 0.4_7_3_2_2_6_8_2, -0.3_7_8_2_3_1_6_4, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __A : Dict = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) __A : Optional[Any] = latents[0, -3:, -3:, -1] __A : List[Any] = np.array( [ 0.1_8_5_3_9_6_4_5, 0.3_3_9_8_7_2_4_8, 0.5_3_7_8_5_5_9, 0.1_4_4_3_7_1_4_2, -0.0_2_4_5_5_2_6_1, -0.7_3_3_8_3_1_7, 0.2_3_9_9_0_7_5_5, 0.4_7_3_5_6_2_7_2, -0.3_7_8_6_5_0_5, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __A : int = False __A : Optional[int] = '''stabilityai/stable-diffusion-2-base''' __A : Tuple = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) __A : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) __A : List[str] = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing() __A : Union[str, Any] = self.get_inputs() pipe(**lowercase_ , callback=lowercase_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCamelCase__( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __A : Tuple = '''stabilityai/stable-diffusion-2-base''' __A : Optional[Any] = DDIMScheduler.from_pretrained(lowercase_ , subfolder='''scheduler''' ) __A : Union[str, Any] = StableDiffusionPanoramaPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ ) __A : Dict = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __A : int = self.get_inputs() __A : Dict = pipe(**lowercase_ ) __A : Optional[Any] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
179
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
0
import sys def lowerCamelCase__ ( _A ): '''simple docstring''' snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )] for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ): for a in range(1 , n - chain_length + 1 ): snake_case_ = a + chain_length - 1 snake_case_ = sys.maxsize for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: snake_case_ = cost snake_case_ = c return matrix, sol def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' if i == j: print("A" + str(SCREAMING_SNAKE_CASE__ ) , end=" " ) else: print("(" , end=" " ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ ) print(")" , end=" " ) def lowerCamelCase__ ( ): '''simple docstring''' snake_case_ = [30, 35, 15, 5, 10, 20, 25] snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 snake_case_ , snake_case_ = matrix_chain_order(SCREAMING_SNAKE_CASE__ ) print("No. of Operation required: " + str(matrix[1][n - 1] ) ) print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 ) if __name__ == "__main__": main()
187
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class lowercase ( _UpperCAmelCase ): __SCREAMING_SNAKE_CASE : Tuple = '''M-CLIP''' def __init__( self , snake_case=1024 , snake_case=768 , **snake_case ): snake_case_ = transformerDimSize snake_case_ = imageDimSize super().__init__(**lowercase_ ) class lowercase ( _UpperCAmelCase ): __SCREAMING_SNAKE_CASE : int = MCLIPConfig def __init__( self , snake_case , *snake_case , **snake_case ): super().__init__(lowercase_ , *lowercase_ , **lowercase_ ) snake_case_ = XLMRobertaModel(lowercase_ ) snake_case_ = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def a ( self , snake_case , snake_case ): snake_case_ = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0] snake_case_ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowercase_ ), embs
285
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A : """simple docstring""" def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any: '''simple docstring''' A__ = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = True A__ = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = True A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([input_mask, next_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ ) A__ = output_from_no_past['hidden_states'][0] A__ = model( lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 ) def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 1_0],config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() A__ = original_model(lowercase_ ).last_hidden_state A__ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 10.0} A__ = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() A__ = scaled_model(lowercase_ ).last_hidden_state A__ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 ) A__ = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_,lowercase_ )
7
0
def __lowercase ( lowerCamelCase : int = 10**12 ): UpperCamelCase_ : Dict = 1 UpperCamelCase_ : Tuple = 0 UpperCamelCase_ : List[str] = 1 UpperCamelCase_ : List[str] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F"""{solution() = }""")
175
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
0
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowerCAmelCase_ = _symbol_database.Default() lowerCAmelCase_ = _descriptor_pool.Default().AddSerializedFile( b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03''' ) lowerCAmelCase_ = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowerCAmelCase_ = None lowerCAmelCase_ = b'''H\003''' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowerCAmelCase_ = 45 lowerCAmelCase_ = 15_81 lowerCAmelCase_ = 15_17 lowerCAmelCase_ = 15_70 lowerCAmelCase_ = 15_84 lowerCAmelCase_ = 17_93 lowerCAmelCase_ = 17_95 lowerCAmelCase_ = 19_16 lowerCAmelCase_ = 18_64 lowerCAmelCase_ = 19_05 lowerCAmelCase_ = 19_19 lowerCAmelCase_ = 24_29 lowerCAmelCase_ = 22_08 lowerCAmelCase_ = 24_18 lowerCAmelCase_ = 23_23 lowerCAmelCase_ = 24_07 # @@protoc_insertion_point(module_scope)
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate lowerCAmelCase_ = trt.Logger(trt.Logger.WARNING) lowerCAmelCase_ = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) lowerCAmelCase_ = logging.getLogger(__name__) lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=3_84, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=1_28, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) lowerCAmelCase_ = parser.parse_args() if args.tokenizer_name: lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) lowerCAmelCase_ = args.per_device_eval_batch_size lowerCAmelCase_ = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties lowerCAmelCase_ = True lowerCAmelCase_ = '''temp_engine/bert-fp32.engine''' if args.fpaa: lowerCAmelCase_ = '''temp_engine/bert-fp16.engine''' if args.inta: lowerCAmelCase_ = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') lowerCAmelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network lowerCAmelCase_ = [network.get_input(i) for i in range(network.num_inputs)] lowerCAmelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: lowerCAmelCase_ = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) lowerCAmelCase_ = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) lowerCAmelCase_ = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) snake_case_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) snake_case_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__ ) # start time snake_case_ = time.time() # Run inference context.execute_async( bindings=[int(SCREAMING_SNAKE_CASE__ ) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Synchronize the stream and take time stream.synchronize() # end time snake_case_ = time.time() snake_case_ = end_time - start_time snake_case_ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. lowerCAmelCase_ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCAmelCase_ = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. lowerCAmelCase_ = raw_datasets['''validation'''].column_names lowerCAmelCase_ = '''question''' if '''question''' in column_names else column_names[0] lowerCAmelCase_ = '''context''' if '''context''' in column_names else column_names[1] lowerCAmelCase_ = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). lowerCAmelCase_ = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) lowerCAmelCase_ = min(args.max_seq_length, tokenizer.model_max_length) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace snake_case_ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. snake_case_ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. snake_case_ = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. snake_case_ = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). snake_case_ = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__ ) snake_case_ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. snake_case_ = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. snake_case_ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples lowerCAmelCase_ = raw_datasets['''validation'''] # Validation Feature Creation lowerCAmelCase_ = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) lowerCAmelCase_ = default_data_collator lowerCAmelCase_ = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) lowerCAmelCase_ = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. snake_case_ = postprocess_qa_predictions( examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: snake_case_ = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: snake_case_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] snake_case_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase_ = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__ ) ) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__ ).itemsize # Allocate device memory for inputs and outputs. lowerCAmelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer lowerCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) lowerCAmelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) lowerCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes) lowerCAmelCase_ = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. lowerCAmelCase_ = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f""" Num examples = {len(eval_dataset)}""") logger.info(f""" Batch size = {args.per_device_eval_batch_size}""") lowerCAmelCase_ = 0.0 lowerCAmelCase_ = 0 lowerCAmelCase_ = timeit.default_timer() lowerCAmelCase_ = None for step, batch in enumerate(eval_dataloader): lowerCAmelCase_ , lowerCAmelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 lowerCAmelCase_ , lowerCAmelCase_ = outputs lowerCAmelCase_ = torch.tensor(start_logits) lowerCAmelCase_ = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered lowerCAmelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00) lowerCAmelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00) lowerCAmelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) lowerCAmelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00) if all_preds is not None: lowerCAmelCase_ = nested_truncate(all_preds, len(eval_dataset)) lowerCAmelCase_ = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00)) logger.info('''Total Number of Inference = %d''', niter) lowerCAmelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds) lowerCAmelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"""Evaluation metrics: {eval_metric}""")
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = (DPMSolverSDEScheduler,) SCREAMING_SNAKE_CASE : str = 10 def snake_case__( self : Optional[int] , **_UpperCamelCase : str ) ->Optional[Any]: snake_case_ = { '''num_train_timesteps''': 1_1_0_0, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**_UpperCamelCase ) return config def snake_case__( self : Optional[Any] ) ->Any: for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=_UpperCamelCase ) def snake_case__( self : List[str] ) ->int: for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCamelCase , beta_end=_UpperCamelCase ) def snake_case__( self : List[str] ) ->Union[str, Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_UpperCamelCase ) def snake_case__( self : List[Any] ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCamelCase ) def snake_case__( self : Optional[int] ) ->int: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case_ = sample.to(_UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , _UpperCamelCase ) snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = output.prev_sample snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def snake_case__( self : Optional[Any] ) ->str: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config(prediction_type='''v_prediction''' ) snake_case_ = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case_ = sample.to(_UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , _UpperCamelCase ) snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = output.prev_sample snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def snake_case__( self : List[str] ) ->Union[str, Any]: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**_UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase ) snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , _UpperCamelCase ) snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = output.prev_sample snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def snake_case__( self : Optional[Any] ) ->int: snake_case_ = self.scheduler_classes[0] snake_case_ = self.get_scheduler_config() snake_case_ = scheduler_class(**_UpperCamelCase , use_karras_sigmas=_UpperCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_UpperCamelCase ) snake_case_ = self.dummy_model() snake_case_ = self.dummy_sample_deter.to(_UpperCamelCase ) * scheduler.init_noise_sigma snake_case_ = sample.to(_UpperCamelCase ) for t in scheduler.timesteps: snake_case_ = scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase ) snake_case_ = model(_UpperCamelCase , _UpperCamelCase ) snake_case_ = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = output.prev_sample snake_case_ = torch.sum(torch.abs(_UpperCamelCase ) ) snake_case_ = torch.mean(torch.abs(_UpperCamelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
from numpy import exp, pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = BertJapaneseTokenizer SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : str = True def snake_case__( self : str ) ->Tuple: super().setUp() snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = '''こんにちは、世界。 \nこんばんは、世界。''' snake_case_ = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict ) ->Tuple: snake_case_, snake_case_ = self.get_input_output_texts(_UpperCamelCase ) snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return text, ids def snake_case__( self : Any ) ->Dict: pass # TODO add if relevant def snake_case__( self : Optional[Any] ) ->Optional[Any]: pass # TODO add if relevant def snake_case__( self : Optional[Any] ) ->Any: pass # TODO add if relevant def snake_case__( self : Optional[int] ) ->int: snake_case_ = self.tokenizer_class(self.vocab_file ) snake_case_ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) def snake_case__( self : Dict ) ->Any: snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(_UpperCamelCase ) snake_case_ = '''こんにちは、世界。\nこんばんは、世界。''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , '''rb''' ) as handle: snake_case_ = pickle.load(_UpperCamelCase ) snake_case_ = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : List[Any] ) ->Tuple: snake_case_ = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : int ) ->List[Any]: try: snake_case_ = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : Union[str, Any] ) ->str: try: snake_case_ = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : List[str] ) ->Dict: snake_case_ = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : Optional[int] ) ->List[str]: try: snake_case_ = MecabTokenizer( do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def snake_case__( self : Optional[int] ) ->Union[str, Any]: snake_case_ = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def snake_case__( self : Optional[Any] ) ->str: snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(_UpperCamelCase ) snake_case_ = '''こんにちは、世界。\nこんばんは、世界。''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , '''rb''' ) as handle: snake_case_ = pickle.load(_UpperCamelCase ) snake_case_ = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_sudachi def snake_case__( self : Tuple ) ->Optional[int]: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def snake_case__( self : str ) ->Tuple: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def snake_case__( self : Dict ) ->List[Any]: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def snake_case__( self : Optional[int] ) ->Tuple: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def snake_case__( self : Optional[Any] ) ->int: snake_case_ = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def snake_case__( self : Dict ) ->List[str]: snake_case_ = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def snake_case__( self : List[str] ) ->List[Any]: snake_case_ = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(_UpperCamelCase ) snake_case_ = '''こんにちは、世界。\nこんばんは、世界。''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , '''rb''' ) as handle: snake_case_ = pickle.load(_UpperCamelCase ) snake_case_ = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_jumanpp def snake_case__( self : List[str] ) ->Dict: snake_case_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def snake_case__( self : Any ) ->Any: snake_case_ = JumanppTokenizer(do_lower_case=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def snake_case__( self : int ) ->Dict: snake_case_ = JumanppTokenizer(normalize_text=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def snake_case__( self : int ) ->Optional[Any]: snake_case_ = JumanppTokenizer(trim_whitespace=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def snake_case__( self : Any ) ->Optional[int]: snake_case_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] snake_case_ = {} for i, token in enumerate(_UpperCamelCase ): snake_case_ = i snake_case_ = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def snake_case__( self : Optional[Any] ) ->Optional[int]: snake_case_ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) snake_case_ = tokenizer.subword_tokenizer snake_case_ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(_UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) snake_case_ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(_UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def snake_case__( self : str ) ->Tuple: snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = BertJapaneseTokenizer SCREAMING_SNAKE_CASE : int = False def snake_case__( self : List[str] ) ->int: super().setUp() snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def snake_case__( self : Optional[Any] , **_UpperCamelCase : Union[str, Any] ) ->int: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : Union[str, Any] ) ->List[Any]: snake_case_ = '''こんにちは、世界。 \nこんばんは、世界。''' snake_case_ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def snake_case__( self : Dict ) ->Union[str, Any]: pass # TODO add if relevant def snake_case__( self : Any ) ->Union[str, Any]: pass # TODO add if relevant def snake_case__( self : Tuple ) ->Tuple: pass # TODO add if relevant def snake_case__( self : List[Any] ) ->int: snake_case_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) snake_case_ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( _UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] ) def snake_case__( self : List[str] ) ->List[str]: snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] snake_case_ = {} for i, token in enumerate(_UpperCamelCase ): snake_case_ = i snake_case_ = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def snake_case__( self : Dict ) ->Tuple: snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : str ) ->int: snake_case_ = '''cl-tohoku/bert-base-japanese''' snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) snake_case_ = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : Any ) ->Optional[int]: snake_case_ = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase ) snake_case_ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) snake_case_ = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids snake_case_ = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids snake_case_ = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss snake_case_ = -(labels.shape[-1] * loss.item()) snake_case_ = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str=0.0 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True , _UpperCamelCase : str = "layer_norm" , _UpperCamelCase : bool = False , ) ->str: super().__init__() snake_case_ = only_cross_attention snake_case_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' snake_case_ = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ = AdaLayerNorm(_UpperCamelCase , _UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ = AdaLayerNormZero(_UpperCamelCase , _UpperCamelCase ) else: snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase ) snake_case_ = Attention( query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_UpperCamelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ = ( AdaLayerNorm(_UpperCamelCase , _UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase ) ) snake_case_ = Attention( query_dim=_UpperCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_UpperCamelCase , dim_head=_UpperCamelCase , dropout=_UpperCamelCase , bias=_UpperCamelCase , upcast_attention=_UpperCamelCase , ) # is self-attn if encoder_hidden_states is none else: snake_case_ = None snake_case_ = None # 3. Feed-forward snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase ) snake_case_ = FeedForward(_UpperCamelCase , dropout=_UpperCamelCase , activation_fn=_UpperCamelCase , final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ = None snake_case_ = 0 def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) ->Any: # Sets chunk feed-forward snake_case_ = chunk_size snake_case_ = dim def snake_case__( self : Optional[Any] , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , _UpperCamelCase : Dict[str, Any] = None , _UpperCamelCase : Optional[torch.LongTensor] = None , ) ->Optional[Any]: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ = self.norma(_UpperCamelCase , _UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = self.norma( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hidden_dtype=hidden_states.dtype ) else: snake_case_ = self.norma(_UpperCamelCase ) snake_case_ = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ = self.attna( _UpperCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_UpperCamelCase , **_UpperCamelCase , ) if self.use_ada_layer_norm_zero: snake_case_ = gate_msa.unsqueeze(1 ) * attn_output snake_case_ = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ = ( self.norma(_UpperCamelCase , _UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ = self.attna( _UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = attn_output + hidden_states # 3. Feed-forward snake_case_ = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: snake_case_ = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ = ff_output + hidden_states return hidden_states class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 4 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : str = "geglu" , _UpperCamelCase : bool = False , ) ->List[str]: super().__init__() snake_case_ = int(dim * mult ) snake_case_ = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ = GELU(_UpperCamelCase , _UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ = GELU(_UpperCamelCase , _UpperCamelCase , approximate='''tanh''' ) elif activation_fn == "geglu": snake_case_ = GEGLU(_UpperCamelCase , _UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ = ApproximateGELU(_UpperCamelCase , _UpperCamelCase ) snake_case_ = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase , _UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Union[str, Any] ) ->Tuple: for module in self.net: snake_case_ = module(_UpperCamelCase ) return hidden_states class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : str = "none" ) ->int: super().__init__() snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase ) snake_case_ = approximate def snake_case__( self : Tuple , _UpperCamelCase : int ) ->Dict: if gate.device.type != "mps": return F.gelu(_UpperCamelCase , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def snake_case__( self : Any , _UpperCamelCase : List[str] ) ->List[Any]: snake_case_ = self.proj(_UpperCamelCase ) snake_case_ = self.gelu(_UpperCamelCase ) return hidden_states class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : int ) ->Dict: super().__init__() snake_case_ = nn.Linear(_UpperCamelCase , dim_out * 2 ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict ) ->Optional[int]: if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict ) ->List[str]: snake_case_, snake_case_ = self.proj(_UpperCamelCase ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : int , _UpperCamelCase : int ) ->Union[str, Any]: super().__init__() snake_case_ = nn.Linear(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Optional[int] ) ->int: snake_case_ = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.702 * x ) class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) ->Union[str, Any]: super().__init__() snake_case_ = nn.Embedding(_UpperCamelCase , _UpperCamelCase ) snake_case_ = nn.SiLU() snake_case_ = nn.Linear(_UpperCamelCase , embedding_dim * 2 ) snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] ) ->Union[str, Any]: snake_case_ = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_, snake_case_ = torch.chunk(_UpperCamelCase , 2 ) snake_case_ = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : int , _UpperCamelCase : Any ) ->str: super().__init__() snake_case_ = CombinedTimestepLabelEmbeddings(_UpperCamelCase , _UpperCamelCase ) snake_case_ = nn.SiLU() snake_case_ = nn.Linear(_UpperCamelCase , 6 * embedding_dim , bias=_UpperCamelCase ) snake_case_ = nn.LayerNorm(_UpperCamelCase , elementwise_affine=_UpperCamelCase , eps=1e-6 ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any]=None ) ->Optional[Any]: snake_case_ = self.linear(self.silu(self.emb(_UpperCamelCase , _UpperCamelCase , hidden_dtype=_UpperCamelCase ) ) ) snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ = emb.chunk(6 , dim=1 ) snake_case_ = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : float = 1e-5 ) ->List[str]: super().__init__() snake_case_ = num_groups snake_case_ = eps if act_fn is None: snake_case_ = None else: snake_case_ = get_activation(_UpperCamelCase ) snake_case_ = nn.Linear(_UpperCamelCase , out_dim * 2 ) def snake_case__( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any ) ->Any: if self.act: snake_case_ = self.act(_UpperCamelCase ) snake_case_ = self.linear(_UpperCamelCase ) snake_case_ = emb[:, :, None, None] snake_case_, snake_case_ = emb.chunk(2 , dim=1 ) snake_case_ = F.group_norm(_UpperCamelCase , self.num_groups , eps=self.eps ) snake_case_ = x * (1 + scale) + shift return x
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = "dandelin/vilt-b32-finetuned-vqa" SCREAMING_SNAKE_CASE : str = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) SCREAMING_SNAKE_CASE : Any = "image_qa" SCREAMING_SNAKE_CASE : str = AutoProcessor SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForVisualQuestionAnswering SCREAMING_SNAKE_CASE : Optional[int] = ["image", "text"] SCREAMING_SNAKE_CASE : List[Any] = ["text"] def __init__( self : Optional[Any] , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[Any]: requires_backends(self , ['''vision'''] ) super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : "Image" , _UpperCamelCase : str ) ->Union[str, Any]: return self.pre_processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' ) def snake_case__( self : Optional[int] , _UpperCamelCase : Dict ) ->int: with torch.no_grad(): return self.model(**_UpperCamelCase ).logits def snake_case__( self : Optional[Any] , _UpperCamelCase : Tuple ) ->Tuple: snake_case_ = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = "encoder-decoder" SCREAMING_SNAKE_CASE : List[Any] = True def __init__( self : Tuple , **_UpperCamelCase : List[str] ) ->Dict: super().__init__(**_UpperCamelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" snake_case_ = kwargs.pop('''encoder''' ) snake_case_ = encoder_config.pop('''model_type''' ) snake_case_ = kwargs.pop('''decoder''' ) snake_case_ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig snake_case_ = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = AutoConfig.for_model(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = True @classmethod def snake_case__( cls : int , _UpperCamelCase : PretrainedConfig , _UpperCamelCase : PretrainedConfig , **_UpperCamelCase : Any ) ->PretrainedConfig: logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) snake_case_ = True snake_case_ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCamelCase ) def snake_case__( self : List[str] ) ->Union[str, Any]: snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.encoder.to_dict() snake_case_ = self.decoder.to_dict() snake_case_ = self.__class__.model_type return output
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
from __future__ import annotations from collections.abc import Iterator from typing import Any class snake_case_ : '''simple docstring''' def __init__( self : Any , _UpperCamelCase : Any ) ->List[Any]: snake_case_ = data snake_case_ = None class snake_case_ : '''simple docstring''' def __init__( self : Optional[Any] ) ->List[str]: snake_case_ = None snake_case_ = None def __iter__( self : Union[str, Any] ) ->Iterator[Any]: snake_case_ = self.head while self.head: yield node.data snake_case_ = node.next if node == self.head: break def __len__( self : Optional[int] ) ->int: return sum(1 for _ in self ) def __repr__( self : Any ) ->Union[str, Any]: return "->".join(str(_UpperCamelCase ) for item in iter(self ) ) def snake_case__( self : Tuple , _UpperCamelCase : Any ) ->None: self.insert_nth(len(self ) , _UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Any ) ->None: self.insert_nth(0 , _UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : int , _UpperCamelCase : Any ) ->None: if index < 0 or index > len(self ): raise IndexError('''list index out of range.''' ) snake_case_ = Node(_UpperCamelCase ) if self.head is None: snake_case_ = new_node # first node points itself snake_case_ = snake_case_ = new_node elif index == 0: # insert at head snake_case_ = self.head snake_case_ = snake_case_ = new_node else: snake_case_ = self.head for _ in range(index - 1 ): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = new_node if index == len(self ) - 1: # insert at tail snake_case_ = new_node def snake_case__( self : Optional[int] ) ->Union[str, Any]: return self.delete_nth(0 ) def snake_case__( self : str ) ->Any: return self.delete_nth(len(self ) - 1 ) def snake_case__( self : List[Any] , _UpperCamelCase : int = 0 ) ->Any: if not 0 <= index < len(self ): raise IndexError('''list index out of range.''' ) snake_case_ = self.head if self.head == self.tail: # just one node snake_case_ = snake_case_ = None elif index == 0: # delete head node snake_case_ = self.tail.next.next snake_case_ = self.head.next else: snake_case_ = self.head for _ in range(index - 1 ): snake_case_ = temp.next snake_case_ = temp.next snake_case_ = temp.next.next if index == len(self ) - 1: # delete at tail snake_case_ = temp return delete_node.data def snake_case__( self : Tuple ) ->bool: return len(self ) == 0 def __SCREAMING_SNAKE_CASE (): snake_case_ = CircularLinkedList() assert len(SCREAMING_SNAKE_CASE__ ) == 0 assert circular_linked_list.is_empty() is True assert str(SCREAMING_SNAKE_CASE__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(SCREAMING_SNAKE_CASE__ ) == i circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 ) assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
import gc import threading import time import psutil import torch class snake_case_ : '''simple docstring''' def __init__( self : Optional[int] ) ->Optional[Any]: snake_case_ = psutil.Process() snake_case_ = False def snake_case__( self : int ) ->Optional[int]: snake_case_ = -1 while True: snake_case_ = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def snake_case__( self : List[Any] ) ->str: snake_case_ = True snake_case_ = threading.Thread(target=self.peak_monitor ) snake_case_ = True self.thread.start() def snake_case__( self : Dict ) ->str: snake_case_ = False self.thread.join() return self.cpu_memory_peak lowerCAmelCase_ = PeakCPUMemory() def __SCREAMING_SNAKE_CASE (): # Time snake_case_ = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem snake_case_ = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): snake_case_ = torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE__ ) torch.cuda.reset_peak_memory_stats() return measures def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # Time snake_case_ = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem snake_case_ = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 snake_case_ = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): snake_case_ = (torch.cuda.memory_allocated(SCREAMING_SNAKE_CASE__ ) - start_measures[str(SCREAMING_SNAKE_CASE__ )]) / 2**20 snake_case_ = (torch.cuda.max_memory_allocated(SCREAMING_SNAKE_CASE__ ) - start_measures[str(SCREAMING_SNAKE_CASE__ )]) / 2**20 return measures def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): print(F'''{description}:''' ) print(F'''- Time: {measures['time']:.2f}s''' ) for i in range(torch.cuda.device_count() ): print(F'''- GPU {i} allocated: {measures[str(SCREAMING_SNAKE_CASE__ )]:.2f}MiB''' ) snake_case_ = measures[F'''{i}-peak'''] print(F'''- GPU {i} peak: {peak:.2f}MiB''' ) print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' ) print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class snake_case_ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case__( self : int ) ->int: snake_case_ = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) snake_case_ = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' ) model.to(_UpperCamelCase ) from datasets import load_dataset snake_case_ = load_dataset('''nielsr/rvlcdip-demo''' ) snake_case_ = dataset['''train'''][0]['''image'''].convert('''RGB''' ) snake_case_ = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): snake_case_ = model(**_UpperCamelCase ) snake_case_ = outputs.logits snake_case_ = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , _UpperCamelCase ) snake_case_ = torch.tensor( [-0.4158, -0.4092, -0.4347] , device=_UpperCamelCase , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ): snake_case_ = {} if train_file is not None: snake_case_ = [train_file] if eval_file is not None: snake_case_ = [eval_file] if test_file is not None: snake_case_ = [test_file] snake_case_ = datasets.load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE__ ) snake_case_ = list(ds[list(files.keys() )[0]].features.keys() ) snake_case_ = features_name.pop(SCREAMING_SNAKE_CASE__ ) snake_case_ = list(set(ds[list(files.keys() )[0]][label_name] ) ) snake_case_ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} snake_case_ = tokenizer.model_input_names snake_case_ = {} if len(SCREAMING_SNAKE_CASE__ ) == 1: for k in files.keys(): snake_case_ = ds[k].map( lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) , batched=SCREAMING_SNAKE_CASE__ , ) elif len(SCREAMING_SNAKE_CASE__ ) == 2: for k in files.keys(): snake_case_ = ds[k].map( lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) , batched=SCREAMING_SNAKE_CASE__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: snake_case_ = {k: v for k, v in ex.items() if k in input_names} snake_case_ = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: snake_case_ = {k: v for k, v in ex.items() if k in input_names} snake_case_ = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: snake_case_ = {k: v for k, v in ex.items() if k in input_names} snake_case_ = labelaid[ex[label_name]] yield (d, label) snake_case_ = ( tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: snake_case_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) snake_case_ = ( tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: snake_case_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) snake_case_ = ( tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: snake_case_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : int = field(metadata={"help": "Which column contains the label"} ) SCREAMING_SNAKE_CASE : str = field(default=__A , metadata={"help": "The path of the training file"} ) SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the development file"} ) SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the test file"} ) SCREAMING_SNAKE_CASE : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE : bool = field( default=__A , metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class snake_case_ : '''simple docstring''' SCREAMING_SNAKE_CASE : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE : Optional[str] = field( default=__A , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE : Optional[str] = field( default=__A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. SCREAMING_SNAKE_CASE : Optional[str] = field( default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) def __SCREAMING_SNAKE_CASE (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) snake_case_, snake_case_, snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.info( F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' F'''16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_, snake_case_, snake_case_, snake_case_ = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): snake_case_ = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , ) def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer snake_case_ = TFTrainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , '''eval_results.txt''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(F''' {key} = {value}''' ) writer.write(F'''{key} = {value}\n''' ) results.update(SCREAMING_SNAKE_CASE__ ) return results if __name__ == "__main__": main()
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '''▁''' lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase_ = { '''facebook/xglm-564M''': 20_48, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : int="<s>" , _UpperCamelCase : int="</s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Tuple="<s>" , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Optional[Any] , ) ->None: snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case_ = 7 snake_case_ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] snake_case_ = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) snake_case_ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} snake_case_ = len(self.sp_model ) snake_case_ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_UpperCamelCase ) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) ->str: snake_case_ = self.__dict__.copy() snake_case_ = None snake_case_ = self.sp_model.serialized_model_proto() return state def __setstate__( self : Any , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case_ = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def snake_case__( self : int ) ->Union[str, Any]: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def snake_case__( self : Any ) ->int: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__( self : int , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ = self.sp_model.PieceToId(_UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case__( self : Dict , _UpperCamelCase : List[str] ) ->int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case__( self : Tuple , _UpperCamelCase : int ) ->str: snake_case_ = ''''''.join(_UpperCamelCase ).replace(_UpperCamelCase , ''' ''' ).strip() return out_string def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
import operator as op lowerCAmelCase_ = '''scaler.pt''' lowerCAmelCase_ = '''pytorch_model''' lowerCAmelCase_ = '''random_states''' lowerCAmelCase_ = '''optimizer''' lowerCAmelCase_ = '''scheduler''' lowerCAmelCase_ = '''pytorch_model.bin''' lowerCAmelCase_ = '''pytorch_model.bin.index.json''' lowerCAmelCase_ = '''model.safetensors''' lowerCAmelCase_ = '''model.safetensors.index.json''' lowerCAmelCase_ = '''1.10.2''' lowerCAmelCase_ = '''py38''' lowerCAmelCase_ = '''4.17.0''' lowerCAmelCase_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge'''] lowerCAmelCase_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2'''] lowerCAmelCase_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP'''] lowerCAmelCase_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH'''] lowerCAmelCase_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT'''] lowerCAmelCase_ = '''2.0.1''' lowerCAmelCase_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich'''] lowerCAmelCase_ = ['''default''', '''reduce-overhead''', '''max-autotune'''] lowerCAmelCase_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCAmelCase_ = [ '''nnodes''', '''nproc_per_node''', '''rdzv_backend''', '''rdzv_endpoint''', '''rdzv_id''', '''rdzv_conf''', '''standalone''', '''max_restarts''', '''monitor_interval''', '''start_method''', '''role''', '''module''', '''m''', '''no_python''', '''run_path''', '''log_dir''', '''r''', '''redirects''', '''t''', '''tee''', '''node_rank''', '''master_addr''', '''master_port''', ] lowerCAmelCase_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM'''] lowerCAmelCase_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = DPTConfig() if "large" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 snake_case_ = [5, 11, 17, 23] snake_case_ = [256, 512, 1024, 1024] snake_case_ = (1, 384, 384) if "ade" in checkpoint_url: snake_case_ = True snake_case_ = 150 snake_case_ = '''huggingface/label-files''' snake_case_ = '''ade20k-id2label.json''' snake_case_ = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) ) , '''r''' ) ) snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = [1, 150, 480, 480] return config, expected_shape def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): snake_case_ = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: snake_case_ = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: snake_case_ = name.replace('''patch_embed''' , '''patch_embeddings''' ) if "pos_embed" in name: snake_case_ = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: snake_case_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: snake_case_ = name.replace('''proj''' , '''projection''' ) if "blocks" in name: snake_case_ = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: snake_case_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: snake_case_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name: snake_case_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: snake_case_ = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: snake_case_ = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: snake_case_ = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: snake_case_ = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: snake_case_ = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: snake_case_ = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: snake_case_ = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 snake_case_ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: snake_case_ = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: snake_case_ = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: snake_case_ = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: snake_case_ = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: snake_case_ = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: snake_case_ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: snake_case_ = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: snake_case_ = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: snake_case_ = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: snake_case_ = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: snake_case_ = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) return name def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) snake_case_ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = in_proj_bias[-config.hidden_size :] def __SCREAMING_SNAKE_CASE (): snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_, snake_case_ = get_dpt_config(SCREAMING_SNAKE_CASE__ ) # load original state_dict from URL snake_case_ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) # rename keys for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) snake_case_ = val # read in qkv matrices read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load HuggingFace model snake_case_ = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() # Check outputs on an image snake_case_ = 480 if '''ade''' in checkpoint_url else 384 snake_case_ = DPTImageProcessor(size=SCREAMING_SNAKE_CASE__ ) snake_case_ = prepare_img() snake_case_ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) # forward pass snake_case_ = model(**SCREAMING_SNAKE_CASE__ ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE__ ).predicted_depth # Assert logits snake_case_ = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: snake_case_ = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(SCREAMING_SNAKE_CASE__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , SCREAMING_SNAKE_CASE__ ) ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) image_processor.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) lowerCAmelCase_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase_ = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , SCREAMING_SNAKE_CASE__ , ) if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return image elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): snake_case_ = [image] if isinstance(image[0] , PIL.Image.Image ): snake_case_, snake_case_ = image[0].size snake_case_, snake_case_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 snake_case_ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] snake_case_ = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) snake_case_ = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0 snake_case_ = image.transpose(0 , 3 , 1 , 2 ) snake_case_ = 2.0 * image - 1.0 snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(image[0] , torch.Tensor ): snake_case_ = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return image def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): return mask elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ): snake_case_ = [mask] if isinstance(mask[0] , PIL.Image.Image ): snake_case_, snake_case_ = mask[0].size snake_case_, snake_case_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 snake_case_ = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] snake_case_ = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 ) snake_case_ = mask.astype(np.floataa ) / 255.0 snake_case_ = 0 snake_case_ = 1 snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) elif isinstance(mask[0] , torch.Tensor ): snake_case_ = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) return mask class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : UNetaDModel SCREAMING_SNAKE_CASE : RePaintScheduler def __init__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) ->List[str]: super().__init__() self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase ) @torch.no_grad() def __call__( self : List[Any] , _UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCamelCase : Union[torch.Tensor, PIL.Image.Image] , _UpperCamelCase : int = 2_5_0 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 1_0 , _UpperCamelCase : int = 1_0 , _UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase : Optional[str] = "pil" , _UpperCamelCase : bool = True , ) ->Union[ImagePipelineOutput, Tuple]: snake_case_ = image snake_case_ = _preprocess_image(_UpperCamelCase ) snake_case_ = original_image.to(device=self.device , dtype=self.unet.dtype ) snake_case_ = _preprocess_mask(_UpperCamelCase ) snake_case_ = mask_image.to(device=self.device , dtype=self.unet.dtype ) snake_case_ = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) snake_case_ = original_image.shape snake_case_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.device ) snake_case_ = eta snake_case_ = self.scheduler.timesteps[0] + 1 snake_case_ = generator[0] if isinstance(_UpperCamelCase , _UpperCamelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual snake_case_ = self.unet(_UpperCamelCase , _UpperCamelCase ).sample # compute previous image: x_t -> x_t-1 snake_case_ = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t snake_case_ = self.scheduler.undo_step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = t snake_case_ = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase )
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->Tuple: snake_case_ = '''hf-internal-testing/tiny-random-t5''' snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ) snake_case_ = tokenizer('''This is me''' , return_tensors='''pt''' ) snake_case_ = model.to_bettertransformer() self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ = model.generate(**_UpperCamelCase ) snake_case_ = model.reverse_bettertransformer() self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCamelCase ) snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ) self.assertFalse( any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ = model_reloaded.generate(**_UpperCamelCase ) self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase ) ) def snake_case__( self : List[Any] ) ->str: snake_case_ = '''hf-internal-testing/tiny-random-t5''' snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ) snake_case_ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_UpperCamelCase ): model.save_pretrained(_UpperCamelCase ) snake_case_ = model.reverse_bettertransformer() model.save_pretrained(_UpperCamelCase )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class snake_case_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Dict ) ->Any: if isinstance(_UpperCamelCase , _UpperCamelCase ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden snake_case_ = deepcopy(_UpperCamelCase ) elif os.path.exists(_UpperCamelCase ): with io.open(_UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: snake_case_ = json.load(_UpperCamelCase ) else: try: snake_case_ = baseaa.urlsafe_baadecode(_UpperCamelCase ).decode('''utf-8''' ) snake_case_ = json.loads(_UpperCamelCase ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' ) snake_case_ = config self.set_stage_and_offload() def snake_case__( self : Optional[int] ) ->Optional[Any]: # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. snake_case_ = self.get_value('''zero_optimization.stage''' , -1 ) # offload snake_case_ = False if self.is_zeroa() or self.is_zeroa(): snake_case_ = set(['''cpu''', '''nvme'''] ) snake_case_ = set( [ self.get_value('''zero_optimization.offload_optimizer.device''' ), self.get_value('''zero_optimization.offload_param.device''' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: snake_case_ = True def snake_case__( self : int , _UpperCamelCase : List[Any] ) ->List[Any]: snake_case_ = self.config # find the config node of interest if it exists snake_case_ = ds_key_long.split('''.''' ) snake_case_ = nodes.pop() for node in nodes: snake_case_ = config.get(_UpperCamelCase ) if config is None: return None, ds_key return config, ds_key def snake_case__( self : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=None ) ->List[Any]: snake_case_, snake_case_ = self.find_config_node(_UpperCamelCase ) if config is None: return default return config.get(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple=False ) ->List[Any]: snake_case_ = self.config # find the config node of interest if it exists snake_case_ = ds_key_long.split('''.''' ) for node in nodes: snake_case_ = config snake_case_ = config.get(_UpperCamelCase ) if config is None: if must_exist: raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' ) else: return # if found remove it if parent_config is not None: parent_config.pop(_UpperCamelCase ) def snake_case__( self : int , _UpperCamelCase : Union[str, Any] ) ->Optional[Any]: snake_case_ = self.get_value(_UpperCamelCase ) return False if value is None else bool(_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Dict ) ->Optional[Any]: snake_case_ = self.get_value(_UpperCamelCase ) return False if value is None else not bool(_UpperCamelCase ) def snake_case__( self : Optional[int] ) ->Dict: return self._stage == 2 def snake_case__( self : List[str] ) ->List[str]: return self._stage == 3 def snake_case__( self : Any ) ->Any: return self._offload class snake_case_ : '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : Tuple ) ->Optional[int]: snake_case_ = engine def snake_case__( self : Tuple , _UpperCamelCase : Tuple , **_UpperCamelCase : Dict ) ->Tuple: # runs backpropagation and handles mixed precision self.engine.backward(_UpperCamelCase , **_UpperCamelCase ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : Any ) ->Optional[Any]: super().__init__(_UpperCamelCase , device_placement=_UpperCamelCase , scaler=_UpperCamelCase ) snake_case_ = hasattr(self.optimizer , '''overflow''' ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[str]=None ) ->List[str]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def snake_case__( self : Tuple ) ->Any: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def snake_case__( self : Dict ) ->List[str]: if self.__has_overflow__: return self.optimizer.overflow return False class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) ->Dict: super().__init__(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->List[Any]: pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class snake_case_ : '''simple docstring''' def __init__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any=0.001 , _UpperCamelCase : Optional[int]=0 , **_UpperCamelCase : List[str] ) ->int: snake_case_ = params snake_case_ = lr snake_case_ = weight_decay snake_case_ = kwargs class snake_case_ : '''simple docstring''' def __init__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[Any]=0 , **_UpperCamelCase : Dict ) ->List[Any]: snake_case_ = optimizer snake_case_ = total_num_steps snake_case_ = warmup_num_steps snake_case_ = kwargs
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
from collections.abc import Sequence def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ): if not arr: return 0 snake_case_ = 0 if allow_empty_subarrays else float('''-inf''' ) snake_case_ = 0.0 for num in arr: snake_case_ = max(0 if allow_empty_subarrays else num , curr_sum + num ) snake_case_ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() lowerCAmelCase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(f"""{max_subarray_sum(nums) = }""")
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "vivit" def __init__( self : int , _UpperCamelCase : Union[str, Any]=2_2_4 , _UpperCamelCase : List[str]=3_2 , _UpperCamelCase : Optional[Any]=[2, 1_6, 1_6] , _UpperCamelCase : Dict=3 , _UpperCamelCase : int=7_6_8 , _UpperCamelCase : Any=1_2 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=3_0_7_2 , _UpperCamelCase : List[Any]="gelu_fast" , _UpperCamelCase : str=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : str=0.02 , _UpperCamelCase : Optional[int]=1e-06 , _UpperCamelCase : Any=True , **_UpperCamelCase : List[Any] , ) ->int: snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = num_frames snake_case_ = tubelet_size snake_case_ = num_channels snake_case_ = qkv_bias super().__init__(**_UpperCamelCase )
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', } } lowerCAmelCase_ = { '''camembert-base''': 5_12, } lowerCAmelCase_ = '''▁''' class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Dict="<s>" , _UpperCamelCase : str="</s>" , _UpperCamelCase : Dict="</s>" , _UpperCamelCase : List[Any]="<s>" , _UpperCamelCase : Optional[Any]="<unk>" , _UpperCamelCase : List[str]="<pad>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) snake_case_ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> snake_case_ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} snake_case_ = len(self.fairseq_tokens_to_ids ) snake_case_ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def snake_case__( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def snake_case__( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def snake_case__( self : Any ) ->Union[str, Any]: return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__( self : Tuple , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : List[str] , _UpperCamelCase : int ) ->Union[str, Any]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(_UpperCamelCase ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(_UpperCamelCase ) def snake_case__( self : List[Any] , _UpperCamelCase : Tuple ) ->Any: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case__( self : Any , _UpperCamelCase : str ) ->Any: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def __getstate__( self : List[Any] ) ->str: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : int , _UpperCamelCase : str ) ->str: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,)
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = '''bert-base-cased''' lowerCAmelCase_ = '''google/pegasus-xsum''' lowerCAmelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCAmelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCAmelCase_ = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase_ = '''sshleifer/bart-tiny-random''' lowerCAmelCase_ = '''sshleifer/tiny-mbart''' lowerCAmelCase_ = '''sshleifer/tiny-marian-en-de''' def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = '''\n'''.join(SCREAMING_SNAKE_CASE__ ) Path(SCREAMING_SNAKE_CASE__ ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}.source''' ) , SCREAMING_SNAKE_CASE__ ) _dump_articles(os.path.join(SCREAMING_SNAKE_CASE__ , F'''{split}.target''' ) , SCREAMING_SNAKE_CASE__ ) return tmp_dir class snake_case_ ( __A ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def snake_case__( self : List[str] , _UpperCamelCase : Optional[Any] ) ->int: snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES ) snake_case_ = 4 snake_case_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated snake_case_, snake_case_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_UpperCamelCase , _UpperCamelCase ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place snake_case_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def snake_case__( self : int , _UpperCamelCase : Union[str, Any] ) ->Any: snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in ARTICLES ) snake_case_ = max(len(tokenizer.encode(_UpperCamelCase ) ) for a in SUMMARIES ) snake_case_ = 4 snake_case_ = LegacySeqaSeqDataset( _UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=2_0 , max_target_length=_UpperCamelCase , ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def snake_case__( self : List[Any] ) ->List[Any]: snake_case_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) snake_case_ = tmp_dir.joinpath('''train.source''' ).open().readlines() snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_UpperCamelCase , _UpperCamelCase , 1_2_8 , _UpperCamelCase ) snake_case_ = {x.name for x in tmp_dir.iterdir()} snake_case_ = {x.name for x in save_dir.iterdir()} snake_case_ = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_UpperCamelCase ) < len(_UpperCamelCase ) assert len(_UpperCamelCase ) == 1 assert len(packed_examples[0] ) == sum(len(_UpperCamelCase ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def snake_case__( self : Union[str, Any] ) ->Optional[Any]: if not FAIRSEQ_AVAILABLE: return snake_case_, snake_case_, snake_case_ = self._get_dataset(max_len=6_4 ) snake_case_ = 6_4 snake_case_ = ds.make_dynamic_sampler(_UpperCamelCase , required_batch_size_multiple=_UpperCamelCase ) snake_case_ = [len(_UpperCamelCase ) for x in batch_sampler] assert len(set(_UpperCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_UpperCamelCase ) == len(_UpperCamelCase ) # no dropped or added examples snake_case_ = DataLoader(_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) snake_case_ = [] snake_case_ = [] for batch in data_loader: snake_case_ = batch['''input_ids'''].shape snake_case_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple snake_case_ = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(_UpperCamelCase ) if num_src_tokens > (max_tokens * 1.1): failures.append(_UpperCamelCase ) assert num_src_per_batch[0] == max(_UpperCamelCase ) if failures: raise AssertionError(f'''too many tokens in {len(_UpperCamelCase )} batches''' ) def snake_case__( self : List[str] ) ->int: snake_case_, snake_case_, snake_case_ = self._get_dataset(max_len=5_1_2 ) snake_case_ = 2 snake_case_ = ds.make_sortish_sampler(_UpperCamelCase , shuffle=_UpperCamelCase ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 ) snake_case_ = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_UpperCamelCase ) snake_case_ = tokenizer.pad_token_id def count_pad_tokens(_UpperCamelCase : str , _UpperCamelCase : List[str]="input_ids" ): return [batch[k].eq(_UpperCamelCase ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_UpperCamelCase , k='''labels''' ) ) < sum(count_pad_tokens(_UpperCamelCase , k='''labels''' ) ) assert sum(count_pad_tokens(_UpperCamelCase ) ) < sum(count_pad_tokens(_UpperCamelCase ) ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[Any]=1_0_0_0 , _UpperCamelCase : List[Any]=1_2_8 ) ->List[Any]: if os.getenv('''USE_REAL_DATA''' , _UpperCamelCase ): snake_case_ = '''examples/seq2seq/wmt_en_ro''' snake_case_ = max_len * 2 * 6_4 if not Path(_UpperCamelCase ).joinpath('''train.len''' ).exists(): save_len_file(_UpperCamelCase , _UpperCamelCase ) else: snake_case_ = '''examples/seq2seq/test_data/wmt_en_ro''' snake_case_ = max_len * 4 save_len_file(_UpperCamelCase , _UpperCamelCase ) snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=_UpperCamelCase , type_path='''train''' , max_source_length=_UpperCamelCase , max_target_length=_UpperCamelCase , n_obs=_UpperCamelCase , ) return ds, max_tokens, tokenizer def snake_case__( self : Union[str, Any] ) ->Optional[int]: snake_case_, snake_case_, snake_case_ = self._get_dataset() snake_case_ = set(DistributedSortishSampler(_UpperCamelCase , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_UpperCamelCase ) ) snake_case_ = set(DistributedSortishSampler(_UpperCamelCase , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_UpperCamelCase ) ) assert idsa.intersection(_UpperCamelCase ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def snake_case__( self : List[str] , _UpperCamelCase : Tuple ) ->Optional[Any]: snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase , use_fast=_UpperCamelCase ) if tok_name == MBART_TINY: snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) snake_case_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: snake_case_ = SeqaSeqDataset( _UpperCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) snake_case_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_UpperCamelCase ) == 1 if tok_name == BART_TINY else len(_UpperCamelCase ) == 0
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } lowerCAmelCase_ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } lowerCAmelCase_ = { '''facebook/blenderbot_small-90M''': 5_12, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : int = BlenderbotSmallTokenizer def __init__( self : List[Any] , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any="<|endoftext|>" , _UpperCamelCase : List[str]="<|endoftext|>" , _UpperCamelCase : List[Any]="<|endoftext|>" , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[Any]=True , **_UpperCamelCase : Optional[int] , ) ->Any: super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase , merges=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase , ) , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = add_prefix_space def snake_case__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=None ) ->Tuple: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def snake_case__( self : Optional[int] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
import datasets from .evaluate import evaluate lowerCAmelCase_ = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' lowerCAmelCase_ = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' lowerCAmelCase_ = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : str ) ->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )}, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , ) def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : Any ) ->List[str]: snake_case_ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} snake_case_ = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] snake_case_ = evaluate(dataset=_UpperCamelCase , predictions=_UpperCamelCase ) return score
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = Dict[str, Any] lowerCAmelCase_ = List[Prediction] @add_end_docstrings(__A ) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : str , *_UpperCamelCase : Tuple , **_UpperCamelCase : Optional[Any] ) ->int: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def snake_case__( self : Optional[Any] , **_UpperCamelCase : Any ) ->Optional[int]: snake_case_ = {} if "threshold" in kwargs: snake_case_ = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Tuple ) ->Union[Predictions, List[Prediction]]: return super().__call__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : str ) ->Union[str, Any]: snake_case_ = load_image(_UpperCamelCase ) snake_case_ = torch.IntTensor([[image.height, image.width]] ) snake_case_ = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: snake_case_ = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) snake_case_ = target_size return inputs def snake_case__( self : int , _UpperCamelCase : List[str] ) ->List[Any]: snake_case_ = model_inputs.pop('''target_size''' ) snake_case_ = self.model(**_UpperCamelCase ) snake_case_ = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: snake_case_ = model_inputs['''bbox'''] return model_outputs def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str]=0.9 ) ->List[Any]: snake_case_ = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. snake_case_, snake_case_ = target_size[0].tolist() def unnormalize(_UpperCamelCase : int ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1_0_0_0), (height * bbox[1] / 1_0_0_0), (width * bbox[2] / 1_0_0_0), (height * bbox[3] / 1_0_0_0), ] ) ) snake_case_, snake_case_ = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) snake_case_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] snake_case_ = [unnormalize(_UpperCamelCase ) for bbox in model_outputs['''bbox'''].squeeze(0 )] snake_case_ = ['''score''', '''label''', '''box'''] snake_case_ = [dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(scores.tolist() , _UpperCamelCase , _UpperCamelCase ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel snake_case_ = self.image_processor.post_process_object_detection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) snake_case_ = raw_annotations[0] snake_case_ = raw_annotation['''scores'''] snake_case_ = raw_annotation['''labels'''] snake_case_ = raw_annotation['''boxes'''] snake_case_ = scores.tolist() snake_case_ = [self.model.config.idalabel[label.item()] for label in labels] snake_case_ = [self._get_bounding_box(_UpperCamelCase ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] snake_case_ = ['''score''', '''label''', '''box'''] snake_case_ = [ dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def snake_case__( self : List[Any] , _UpperCamelCase : "torch.Tensor" ) ->Dict[str, int]: if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) snake_case_, snake_case_, snake_case_, snake_case_ = box.int().tolist() snake_case_ = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } lowerCAmelCase_ = { '''b0''': { '''hidden_dim''': 12_80, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 2_24, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 12_80, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 2_40, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 14_08, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 2_60, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 15_36, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 3_00, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 17_92, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 3_80, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 20_48, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 4_56, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 23_04, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 5_28, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 25_60, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 6_00, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = EfficientNetConfig() snake_case_ = CONFIG_MAP[model_name]['''hidden_dim'''] snake_case_ = CONFIG_MAP[model_name]['''width_coef'''] snake_case_ = CONFIG_MAP[model_name]['''depth_coef'''] snake_case_ = CONFIG_MAP[model_name]['''image_size'''] snake_case_ = CONFIG_MAP[model_name]['''dropout_rate'''] snake_case_ = CONFIG_MAP[model_name]['''dw_padding'''] snake_case_ = '''huggingface/label-files''' snake_case_ = '''imagenet-1k-id2label.json''' snake_case_ = 1000 snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) ) snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def __SCREAMING_SNAKE_CASE (): snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = CONFIG_MAP[model_name]['''image_size'''] snake_case_ = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=SCREAMING_SNAKE_CASE__ , ) return preprocessor def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] snake_case_ = sorted(set(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = {b: str(SCREAMING_SNAKE_CASE__ ) for b, i in zip(SCREAMING_SNAKE_CASE__ , range(SCREAMING_SNAKE_CASE__ ) )} snake_case_ = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: snake_case_ = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) snake_case_ = {} for item in rename_keys: if item[0] in original_param_names: snake_case_ = '''efficientnet.''' + item[1] snake_case_ = '''classifier.weight''' snake_case_ = '''classifier.bias''' return key_mapping def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for key, value in tf_params.items(): if "normalization" in key: continue snake_case_ = key_mapping[key] if "_conv" in key and "kernel" in key: snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: snake_case_ = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE__ ) ) else: snake_case_ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = model_classes[model_name]( include_top=SCREAMING_SNAKE_CASE__ , weights='''imagenet''' , input_tensor=SCREAMING_SNAKE_CASE__ , input_shape=SCREAMING_SNAKE_CASE__ , pooling=SCREAMING_SNAKE_CASE__ , classes=1000 , classifier_activation='''softmax''' , ) snake_case_ = original_model.trainable_variables snake_case_ = original_model.non_trainable_variables snake_case_ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: snake_case_ = param.numpy() snake_case_ = list(tf_params.keys() ) # Load HuggingFace model snake_case_ = get_efficientnet_config(SCREAMING_SNAKE_CASE__ ) snake_case_ = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() snake_case_ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) snake_case_ = rename_keys(SCREAMING_SNAKE_CASE__ ) replace_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Initialize preprocessor and preprocess input image snake_case_ = convert_image_processor(SCREAMING_SNAKE_CASE__ ) snake_case_ = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): snake_case_ = hf_model(**SCREAMING_SNAKE_CASE__ ) snake_case_ = outputs.logits.detach().numpy() # Original model inference snake_case_ = False snake_case_ = CONFIG_MAP[model_name]['''image_size'''] snake_case_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) snake_case_ = image.img_to_array(SCREAMING_SNAKE_CASE__ ) snake_case_ = np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=0 ) snake_case_ = original_model.predict(SCREAMING_SNAKE_CASE__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): os.mkdir(SCREAMING_SNAKE_CASE__ ) # Save converted model and image processor hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) preprocessor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) snake_case_ = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(SCREAMING_SNAKE_CASE__ ) hf_model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') lowerCAmelCase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
import argparse import datetime def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } snake_case_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(SCREAMING_SNAKE_CASE__ ) < 11: raise ValueError('''Must be 10 characters long''' ) # Get month snake_case_ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('''Month must be between 1 - 12''' ) snake_case_ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get day snake_case_ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('''Date must be between 1 - 31''' ) # Get second separator snake_case_ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''' ) # Get year snake_case_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''' ) # Get datetime obj for validation snake_case_ = datetime.date(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) ) # Start math if m <= 2: snake_case_ = y - 1 snake_case_ = m + 12 # maths var snake_case_ = int(str(SCREAMING_SNAKE_CASE__ )[:2] ) snake_case_ = int(str(SCREAMING_SNAKE_CASE__ )[2:] ) snake_case_ = int(2.6 * m - 5.39 ) snake_case_ = int(c / 4 ) snake_case_ = int(k / 4 ) snake_case_ = int(d + k ) snake_case_ = int(t + u + v + x ) snake_case_ = int(z - (2 * c) ) snake_case_ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' ) # Response snake_case_ = F'''Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) lowerCAmelCase_ = parser.parse_args() zeller(args.date_input)
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = ReformerTokenizer SCREAMING_SNAKE_CASE : int = ReformerTokenizerFast SCREAMING_SNAKE_CASE : int = True SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : str = True def snake_case__( self : Dict ) ->List[str]: super().setUp() snake_case_ = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''<s>''' snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase ) def snake_case__( self : Optional[Any] ) ->Dict: snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_UpperCamelCase ) , 1_0_0_0 ) def snake_case__( self : Union[str, Any] ) ->Dict: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def snake_case__( self : List[Any] ) ->str: if not self.test_rust_tokenizer: return snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = '''I was born in 92000, and this is falsé.''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) snake_case_ = rust_tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) snake_case_ = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(_UpperCamelCase ) snake_case_ = rust_tokenizer.encode(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Optional[int] , _UpperCamelCase : Tuple=1_5 ) ->Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) # Simple input snake_case_ = '''This is a simple input''' snake_case_ = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case_ = ('''This is a simple input''', '''This is a pair''') snake_case_ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' ) # Simple input self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' ) # Simple input self.assertRaises( _UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' , ) # Pair input self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' ) # Pair input self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' ) # Pair input self.assertRaises( _UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding='''max_length''' , ) def snake_case__( self : Dict ) ->int: pass def snake_case__( self : Dict ) ->str: snake_case_ = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase ) snake_case_ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def snake_case__( self : Optional[int] ) ->Tuple: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''Hello World!''' snake_case_ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) ) @slow def snake_case__( self : str ) ->str: snake_case_ = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) snake_case_ = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) ) @require_torch @slow def snake_case__( self : List[str] ) ->List[str]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence snake_case_ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] snake_case_ = ''' '''.join(_UpperCamelCase ) snake_case_ = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors='''pt''' ) snake_case_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) snake_case_ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) snake_case_ = encoded_sequence['''input_ids'''].shape snake_case_ = ReformerModel(_UpperCamelCase ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_UpperCamelCase ) model(**_UpperCamelCase ) @slow def snake_case__( self : List[Any] ) ->Dict: # fmt: off snake_case_ = {'''input_ids''': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 snake_case_ = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
import numpy as np from transformers import Pipeline def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ ) snake_case_ = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ ) class snake_case_ ( __A ): '''simple docstring''' def snake_case__( self : Tuple , **_UpperCamelCase : Tuple ) ->Optional[int]: snake_case_ = {} if "second_text" in kwargs: snake_case_ = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : List[Any]=None ) ->Tuple: return self.tokenizer(_UpperCamelCase , text_pair=_UpperCamelCase , return_tensors=self.framework ) def snake_case__( self : List[str] , _UpperCamelCase : int ) ->str: return self.model(**_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[Any] ) ->Tuple: snake_case_ = model_outputs.logits[0].numpy() snake_case_ = softmax(_UpperCamelCase ) snake_case_ = np.argmax(_UpperCamelCase ) snake_case_ = self.model.config.idalabel[best_class] snake_case_ = probabilities[best_class].item() snake_case_ = logits.tolist() return {"label": label, "score": score, "logits": logits}
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = ["onnx"] def __init__( self : Optional[Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : Union[str, Any] ) ->Union[str, Any]: requires_backends(self , ['''onnx'''] ) @classmethod def snake_case__( cls : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[Any] ) ->Any: requires_backends(cls , ['''onnx'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : int ) ->Optional[Any]: requires_backends(cls , ['''onnx'''] )
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''check_bouncy() accepts only integer arguments''' ) snake_case_ = str(SCREAMING_SNAKE_CASE__ ) snake_case_ = ''''''.join(sorted(SCREAMING_SNAKE_CASE__ ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 99 ): if not 0 < percent < 100: raise ValueError('''solution() only accepts values from 0 to 100''' ) snake_case_ = 0 snake_case_ = 1 while True: if check_bouncy(SCREAMING_SNAKE_CASE__ ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f"""{solution(99)}""")
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import pytest lowerCAmelCase_ = '''__dummy_dataset1__''' lowerCAmelCase_ = ''' import json import os import datasets REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) ''' @pytest.fixture def __SCREAMING_SNAKE_CASE (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __SCREAMING_SNAKE_CASE (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = dataset_loading_script_name snake_case_ = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=SCREAMING_SNAKE_CASE__ ) snake_case_ = script_dir / F'''{script_name}.py''' with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) return str(SCREAMING_SNAKE_CASE__ )
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[List[PIL.Image.Image], np.ndarray] SCREAMING_SNAKE_CASE : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class snake_case_ : '''simple docstring''' def __init__( self : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str=1_3 , _UpperCamelCase : str=3_0 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : int=3 , _UpperCamelCase : List[str]=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : int=3_2 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Any=4 , _UpperCamelCase : Optional[Any]=3_7 , _UpperCamelCase : int="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Tuple=1_0 , _UpperCamelCase : Union[str, Any]=0.02 , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : List[str]=None , _UpperCamelCase : Dict=2 , ) ->Tuple: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = scope snake_case_ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) snake_case_ = (image_size // patch_size) ** 2 snake_case_ = num_patches + 2 def snake_case__( self : Tuple ) ->Dict: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = self.get_config() return config, pixel_values, labels def snake_case__( self : List[Any] ) ->Any: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case__( self : Dict , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) ->Dict: snake_case_ = TFDeiTModel(config=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) ->List[Any]: snake_case_ = TFDeiTForMaskedImageModeling(config=_UpperCamelCase ) snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ = 1 snake_case_ = TFDeiTForMaskedImageModeling(_UpperCamelCase ) snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) ->Tuple: snake_case_ = self.type_sequence_label_size snake_case_ = TFDeiTForImageClassification(_UpperCamelCase ) snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ = 1 snake_case_ = TFDeiTForImageClassification(_UpperCamelCase ) snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case__( self : int ) ->Any: snake_case_ = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case_ ( __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE : Union[str, Any] = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE : int = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : List[Any] = False def snake_case__( self : List[Any] ) ->Tuple: snake_case_ = TFDeiTModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 ) def snake_case__( self : List[str] ) ->Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def snake_case__( self : Tuple ) ->int: pass def snake_case__( self : Optional[int] ) ->str: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase , tf.keras.layers.Dense ) ) def snake_case__( self : Dict ) ->str: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def snake_case__( self : Tuple ) ->str: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase ) def snake_case__( self : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str]=False ) ->Tuple: snake_case_ = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def snake_case__( self : str ) ->Tuple: for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = TFDeiTModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def __SCREAMING_SNAKE_CASE (): snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case_ ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case__( self : List[str] ) ->Any: return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def snake_case__( self : Optional[Any] ) ->int: snake_case_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''tf''' ) # forward pass snake_case_ = model(**_UpperCamelCase ) # verify the logits snake_case_ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) snake_case_ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() snake_case_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) ) snake_case_ = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } snake_case_ = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6_0_0_0, '''return_attention_mask''': False, '''do_normalize''': True, } snake_case_ = tempfile.mkdtemp() snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(self.tmpdirname , _UpperCamelCase ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCamelCase ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCamelCase ) + '''\n''' ) # load decoder from hub snake_case_ = '''hf-internal-testing/ngram-beam-search-decoder''' def snake_case__( self : Union[str, Any] , **_UpperCamelCase : Optional[int] ) ->List[Any]: snake_case_ = self.add_kwargs_tokens_map.copy() kwargs.update(_UpperCamelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def snake_case__( self : Optional[int] , **_UpperCamelCase : List[str] ) ->Optional[Any]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def snake_case__( self : str , **_UpperCamelCase : List[str] ) ->Optional[int]: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCamelCase ) def snake_case__( self : Any ) ->Optional[int]: shutil.rmtree(self.tmpdirname ) def snake_case__( self : Dict ) ->Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_feature_extractor() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) snake_case_ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _UpperCamelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _UpperCamelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _UpperCamelCase ) def snake_case__( self : str ) ->Tuple: snake_case_ = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match snake_case_ = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def snake_case__( self : List[str] ) ->Any: snake_case_ = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(_UpperCamelCase , '''include''' ): WavaVecaProcessorWithLM( tokenizer=_UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def snake_case__( self : str ) ->str: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) snake_case_ = floats_list((3, 1_0_0_0) ) snake_case_ = feature_extractor(_UpperCamelCase , return_tensors='''np''' ) snake_case_ = processor(_UpperCamelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case__( self : Union[str, Any] ) ->Optional[int]: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) snake_case_ = '''This is a test string''' snake_case_ = processor(text=_UpperCamelCase ) snake_case_ = tokenizer(_UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case__( self : str , _UpperCamelCase : Optional[Any]=(2, 1_0, 1_6) , _UpperCamelCase : Optional[int]=7_7 ) ->Any: np.random.seed(_UpperCamelCase ) return np.random.rand(*_UpperCamelCase ) def snake_case__( self : int ) ->Any: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) snake_case_ = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 ) snake_case_ = processor.decode(_UpperCamelCase ) snake_case_ = decoder.decode_beams(_UpperCamelCase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict ) ->Any: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) snake_case_ = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: snake_case_ = processor.batch_decode(_UpperCamelCase ) else: with get_context(_UpperCamelCase ).Pool() as pool: snake_case_ = processor.batch_decode(_UpperCamelCase , _UpperCamelCase ) snake_case_ = list(_UpperCamelCase ) with get_context('''fork''' ).Pool() as p: snake_case_ = decoder.decode_beams_batch(_UpperCamelCase , _UpperCamelCase ) snake_case_, snake_case_, snake_case_ = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_UpperCamelCase , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(_UpperCamelCase , decoded_processor.logit_score ) self.assertListEqual(_UpperCamelCase , decoded_processor.lm_score ) def snake_case__( self : Tuple ) ->Union[str, Any]: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) snake_case_ = self._get_dummy_logits() snake_case_ = 1_5 snake_case_ = -20.0 snake_case_ = -4.0 snake_case_ = processor.batch_decode( _UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , ) snake_case_ = decoded_processor_out.text snake_case_ = list(_UpperCamelCase ) with get_context('''fork''' ).Pool() as pool: snake_case_ = decoder.decode_beams_batch( _UpperCamelCase , _UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , ) snake_case_ = [d[0][0] for d in decoded_decoder_out] snake_case_ = [d[0][2] for d in decoded_decoder_out] snake_case_ = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _UpperCamelCase ) self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCamelCase , atol=1e-3 ) ) self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _UpperCamelCase , atol=1e-3 ) ) def snake_case__( self : Tuple ) ->Tuple: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) snake_case_ = self._get_dummy_logits() snake_case_ = 2.0 snake_case_ = 5.0 snake_case_ = -20.0 snake_case_ = True snake_case_ = processor.batch_decode( _UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , ) snake_case_ = decoded_processor_out.text snake_case_ = list(_UpperCamelCase ) decoder.reset_params( alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , ) with get_context('''fork''' ).Pool() as pool: snake_case_ = decoder.decode_beams_batch( _UpperCamelCase , _UpperCamelCase , ) snake_case_ = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _UpperCamelCase ) snake_case_ = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _UpperCamelCase ) def snake_case__( self : Dict ) ->str: snake_case_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case_ = processor.decoder.model_container[processor.decoder._model_key] snake_case_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() snake_case_ = os.listdir(_UpperCamelCase ) snake_case_ = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : List[Any] ) ->Optional[Any]: snake_case_ = snapshot_download('''hf-internal-testing/processor_with_lm''' ) snake_case_ = WavaVecaProcessorWithLM.from_pretrained(_UpperCamelCase ) snake_case_ = processor.decoder.model_container[processor.decoder._model_key] snake_case_ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() snake_case_ = os.listdir(_UpperCamelCase ) snake_case_ = os.listdir(_UpperCamelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : Tuple ) ->Optional[Any]: snake_case_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case_ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case_ = floats_list((3, 1_0_0_0) ) snake_case_ = processor_wavaveca(_UpperCamelCase , return_tensors='''np''' ) snake_case_ = processor_auto(_UpperCamelCase , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) snake_case_ = self._get_dummy_logits() snake_case_ = processor_wavaveca.batch_decode(_UpperCamelCase ) snake_case_ = processor_auto.batch_decode(_UpperCamelCase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def snake_case__( self : List[str] ) ->List[str]: snake_case_ = self.get_feature_extractor() snake_case_ = self.get_tokenizer() snake_case_ = self.get_decoder() snake_case_ = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def snake_case__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ) ->str: snake_case_ = [d[key] for d in offsets] return retrieved_list def snake_case__( self : str ) ->Union[str, Any]: snake_case_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case_ = self._get_dummy_logits()[0] snake_case_ = processor.decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def snake_case__( self : Dict ) ->int: snake_case_ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) snake_case_ = self._get_dummy_logits() snake_case_ = processor.batch_decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(_UpperCamelCase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def snake_case__( self : int ) ->Any: import torch snake_case_ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_UpperCamelCase ) snake_case_ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6_0_0_0 ) ) snake_case_ = iter(_UpperCamelCase ) snake_case_ = next(_UpperCamelCase ) snake_case_ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) snake_case_ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train snake_case_ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): snake_case_ = model(_UpperCamelCase ).logits.cpu().numpy() snake_case_ = processor.decode(logits[0] , output_word_offsets=_UpperCamelCase ) snake_case_ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate snake_case_ = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] snake_case_ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(_UpperCamelCase , '''word''' ) ) , _UpperCamelCase ) self.assertEqual(''' '''.join(self.get_from_offsets(_UpperCamelCase , '''word''' ) ) , output.text ) # output times snake_case_ = torch.tensor(self.get_from_offsets(_UpperCamelCase , '''start_time''' ) ) snake_case_ = torch.tensor(self.get_from_offsets(_UpperCamelCase , '''end_time''' ) ) # fmt: off snake_case_ = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) snake_case_ = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) ) self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
lowerCAmelCase_ = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowerCAmelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowerCAmelCase_ = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case_ ( __A , __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE : List[str] = frozenset([] ) def snake_case__( self : int ) ->Optional[Any]: torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCamelCase , ) snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) snake_case_ = CLIPTextModel(_UpperCamelCase ) snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) snake_case_ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def snake_case__( self : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=0 ) ->List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('''RGB''' ).resize((6_4, 6_4) ) snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith('''mps''' ): snake_case_ = torch.manual_seed(_UpperCamelCase ) else: snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def snake_case__( self : Union[str, Any] ) ->Optional[Any]: snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ = sd_pipe(**_UpperCamelCase ).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case__( self : str ) ->int: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__( self : List[str] ) ->Optional[Any]: snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) snake_case_ = '''stabilityai/stable-diffusion-2-inpainting''' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase , safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , generator=_UpperCamelCase , output_type='''np''' , ) snake_case_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def snake_case__( self : str ) ->Optional[int]: snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) snake_case_ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) snake_case_ = '''stabilityai/stable-diffusion-2-inpainting''' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase , torch_dtype=torch.floataa , safety_checker=_UpperCamelCase , ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , generator=_UpperCamelCase , output_type='''np''' , ) snake_case_ = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def snake_case__( self : Union[str, Any] ) ->int: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) snake_case_ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) snake_case_ = '''stabilityai/stable-diffusion-2-inpainting''' snake_case_ = PNDMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' ) snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase , safety_checker=_UpperCamelCase , scheduler=_UpperCamelCase , torch_dtype=torch.floataa , ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench''' snake_case_ = torch.manual_seed(0 ) snake_case_ = pipe( prompt=_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''np''' , ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : torch.FloatTensor class snake_case_ ( __A , __A ): '''simple docstring''' @register_to_config def __init__( self : Any , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3 , _UpperCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCamelCase : Tuple[int] = (6_4,) , _UpperCamelCase : int = 1 , _UpperCamelCase : str = "silu" , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2_5_6 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : float = 0.18215 , _UpperCamelCase : str = "group" , ) ->int: super().__init__() # pass init params to Encoder snake_case_ = Encoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , ) snake_case_ = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case_ = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) snake_case_ = VectorQuantizer(_UpperCamelCase , _UpperCamelCase , beta=0.25 , remap=_UpperCamelCase , sane_index_shape=_UpperCamelCase ) snake_case_ = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 ) # pass init params to Decoder snake_case_ = Decoder( in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , norm_type=_UpperCamelCase , ) @apply_forward_hook def snake_case__( self : Any , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : bool = True ) ->VQEncoderOutput: snake_case_ = self.encoder(_UpperCamelCase ) snake_case_ = self.quant_conv(_UpperCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=_UpperCamelCase ) @apply_forward_hook def snake_case__( self : str , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: snake_case_, snake_case_, snake_case_ = self.quantize(_UpperCamelCase ) else: snake_case_ = h snake_case_ = self.post_quant_conv(_UpperCamelCase ) snake_case_ = self.decoder(_UpperCamelCase , quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]: snake_case_ = sample snake_case_ = self.encode(_UpperCamelCase ).latents snake_case_ = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa lowerCAmelCase_ = logging.getLogger(__name__) class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = "summarization" SCREAMING_SNAKE_CASE : Union[str, Any] = ["loss"] SCREAMING_SNAKE_CASE : List[Any] = ROUGE_KEYS SCREAMING_SNAKE_CASE : List[Any] = "rouge2" def __init__( self : int , _UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->List[Any]: if hparams.sortish_sampler and hparams.gpus > 1: snake_case_ = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(_UpperCamelCase , num_labels=_UpperCamelCase , mode=self.mode , **_UpperCamelCase ) use_task_specific_params(self.model , '''summarization''' ) save_git_info(self.hparams.output_dir ) snake_case_ = Path(self.output_dir ) / '''metrics.json''' snake_case_ = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams , self.hparams_save_path ) snake_case_ = 0 snake_case_ = defaultdict(_UpperCamelCase ) snake_case_ = self.config.model_type snake_case_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size snake_case_ = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } snake_case_ = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } snake_case_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} snake_case_ = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) snake_case_ = get_git_info()['''repo_sha'''] snake_case_ = hparams.num_workers snake_case_ = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCamelCase ): snake_case_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang] snake_case_ = self.decoder_start_token_id snake_case_ = ( SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) snake_case_ = False snake_case_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: snake_case_ = self.hparams.eval_max_gen_length else: snake_case_ = self.model.config.max_length snake_case_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def snake_case__( self : List[Any] , _UpperCamelCase : Dict[str, torch.Tensor] ) ->Dict[str, List[str]]: snake_case_ = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(_UpperCamelCase , Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' ) snake_case_ = True return readable_batch def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[str] ) ->List[Any]: return self.model(_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : int , _UpperCamelCase : List[int] ) ->List[Any]: snake_case_ = self.tokenizer.batch_decode( _UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return lmap(str.strip , _UpperCamelCase ) def snake_case__( self : List[Any] , _UpperCamelCase : dict ) ->Tuple: snake_case_ = self.tokenizer.pad_token_id snake_case_, snake_case_ = batch['''input_ids'''], batch['''attention_mask'''] snake_case_ = batch['''labels'''] if isinstance(self.model , _UpperCamelCase ): snake_case_ = self.model._shift_right(_UpperCamelCase ) else: snake_case_ = shift_tokens_right(_UpperCamelCase , _UpperCamelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero snake_case_ = decoder_input_ids self.save_readable_batch(_UpperCamelCase ) snake_case_ = self(_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , use_cache=_UpperCamelCase ) snake_case_ = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id snake_case_ = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase ) assert lm_logits.shape[-1] == self.vocab_size snake_case_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: snake_case_ = nn.functional.log_softmax(_UpperCamelCase , dim=-1 ) snake_case_, snake_case_ = label_smoothed_nll_loss( _UpperCamelCase , _UpperCamelCase , self.hparams.label_smoothing , ignore_index=_UpperCamelCase ) return (loss,) @property def snake_case__( self : Tuple ) ->int: return self.tokenizer.pad_token_id def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : str ) ->Dict: snake_case_ = self._step(_UpperCamelCase ) snake_case_ = dict(zip(self.loss_names , _UpperCamelCase ) ) # tokens per batch snake_case_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() snake_case_ = batch['''input_ids'''].shape[0] snake_case_ = batch['''input_ids'''].eq(self.pad ).sum() snake_case_ = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) ->Dict: return self._generative_step(_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]="val" ) ->Dict: self.step_count += 1 snake_case_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} snake_case_ = losses['''loss'''] snake_case_ = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } snake_case_ = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) snake_case_ = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(_UpperCamelCase ) snake_case_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()} snake_case_ = self.step_count self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path snake_case_ = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def snake_case__( self : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) ->Dict: return calculate_rouge(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : dict ) ->dict: snake_case_ = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') snake_case_ = self.model.generate( batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) snake_case_ = (time.time() - ta) / batch['''input_ids'''].shape[0] snake_case_ = self.ids_to_clean_text(_UpperCamelCase ) snake_case_ = self.ids_to_clean_text(batch['''labels'''] ) snake_case_ = self._step(_UpperCamelCase ) snake_case_ = dict(zip(self.loss_names , _UpperCamelCase ) ) snake_case_ = self.calc_generative_metrics(_UpperCamelCase , _UpperCamelCase ) snake_case_ = np.mean(lmap(_UpperCamelCase , _UpperCamelCase ) ) base_metrics.update(gen_time=_UpperCamelCase , gen_len=_UpperCamelCase , preds=_UpperCamelCase , target=_UpperCamelCase , **_UpperCamelCase ) return base_metrics def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Dict ) ->Optional[int]: return self._generative_step(_UpperCamelCase ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[int] ) ->List[Any]: return self.validation_epoch_end(_UpperCamelCase , prefix='''test''' ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Any ) ->SeqaSeqDataset: snake_case_ = self.n_obs[type_path] snake_case_ = self.target_lens[type_path] snake_case_ = self.dataset_class( self.tokenizer , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , max_target_length=_UpperCamelCase , **self.dataset_kwargs , ) return dataset def snake_case__( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False ) ->DataLoader: snake_case_ = self.get_dataset(_UpperCamelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": snake_case_ = dataset.make_sortish_sampler(_UpperCamelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( _UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": snake_case_ = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( _UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( _UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , ) def snake_case__( self : List[str] ) ->DataLoader: snake_case_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCamelCase ) return dataloader def snake_case__( self : Union[str, Any] ) ->DataLoader: return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size ) def snake_case__( self : Tuple ) ->DataLoader: return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size ) @staticmethod def snake_case__( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) ->Tuple: BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase ) add_generic_args(_UpperCamelCase , _UpperCamelCase ) parser.add_argument( '''--max_source_length''' , default=1_0_2_4 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--max_target_length''' , default=5_6 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--val_max_target_length''' , default=1_4_2 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--test_max_target_length''' , default=1_4_2 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument('''--freeze_encoder''' , action='''store_true''' ) parser.add_argument('''--freeze_embeds''' , action='''store_true''' ) parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_UpperCamelCase ) parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_UpperCamelCase ) parser.add_argument('''--max_tokens_per_batch''' , type=_UpperCamelCase , default=_UpperCamelCase ) parser.add_argument('''--logger_name''' , type=_UpperCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' ) parser.add_argument('''--n_train''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' , type=_UpperCamelCase , default=5_0_0 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' , type=_UpperCamelCase , default='''summarization''' , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' , type=_UpperCamelCase , default=0.0 , required=_UpperCamelCase ) parser.add_argument('''--src_lang''' , type=_UpperCamelCase , default='''''' , required=_UpperCamelCase ) parser.add_argument('''--tgt_lang''' , type=_UpperCamelCase , default='''''' , required=_UpperCamelCase ) parser.add_argument('''--eval_beams''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase ) parser.add_argument( '''--val_metric''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) , ) return parser class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = "translation" SCREAMING_SNAKE_CASE : Dict = ["loss"] SCREAMING_SNAKE_CASE : str = ["bleu"] SCREAMING_SNAKE_CASE : List[str] = "bleu" def __init__( self : Optional[Any] , _UpperCamelCase : Any , **_UpperCamelCase : Any ) ->Optional[int]: super().__init__(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = hparams.src_lang snake_case_ = hparams.tgt_lang def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->dict: return calculate_bleu(_UpperCamelCase , _UpperCamelCase ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) check_output_dir(SCREAMING_SNAKE_CASE__ , expected_items=3 ) if model is None: if "summarization" in args.task: snake_case_ = SummarizationModule(SCREAMING_SNAKE_CASE__ ) else: snake_case_ = TranslationModule(SCREAMING_SNAKE_CASE__ ) snake_case_ = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): snake_case_ = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger snake_case_ = os.environ.get('''WANDB_PROJECT''' , SCREAMING_SNAKE_CASE__ ) snake_case_ = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger snake_case_ = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: snake_case_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: snake_case_ = False snake_case_ = args.val_metric == '''loss''' snake_case_ = generic_train( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE__ ) , early_stopping_callback=SCREAMING_SNAKE_CASE__ , logger=SCREAMING_SNAKE_CASE__ , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model snake_case_ = '''''' snake_case_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=SCREAMING_SNAKE_CASE__ ) ) if checkpoints: snake_case_ = checkpoints[-1] snake_case_ = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() lowerCAmelCase_ = pl.Trainer.add_argparse_args(parser) lowerCAmelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) lowerCAmelCase_ = parser.parse_args() main(args)
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCAmelCase_ = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE (): snake_case_ = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=SCREAMING_SNAKE_CASE__ , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=SCREAMING_SNAKE_CASE__ , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=SCREAMING_SNAKE_CASE__ , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=SCREAMING_SNAKE_CASE__ , default='''data/dump''' , help='''The dump file prefix.''' ) snake_case_ = parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": snake_case_ = BertTokenizer.from_pretrained(args.tokenizer_name ) snake_case_ = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` snake_case_ = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": snake_case_ = RobertaTokenizer.from_pretrained(args.tokenizer_name ) snake_case_ = tokenizer.special_tokens_map['''cls_token'''] # `<s>` snake_case_ = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": snake_case_ = GPTaTokenizer.from_pretrained(args.tokenizer_name ) snake_case_ = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` snake_case_ = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: snake_case_ = fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(SCREAMING_SNAKE_CASE__ )} examples to process.''' ) snake_case_ = [] snake_case_ = 0 snake_case_ = 10000 snake_case_ = time.time() for text in data: snake_case_ = F'''{bos} {text.strip()} {sep}''' snake_case_ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) rslt.append(SCREAMING_SNAKE_CASE__ ) iter += 1 if iter % interval == 0: snake_case_ = time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) snake_case_ = time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(SCREAMING_SNAKE_CASE__ )} examples processed.''' ) snake_case_ = F'''{args.dump_file}.{args.tokenizer_name}.pickle''' snake_case_ = tokenizer.vocab_size if vocab_size < (1 << 16): snake_case_ = [np.uintaa(SCREAMING_SNAKE_CASE__ ) for d in rslt] else: snake_case_ = [np.intaa(SCREAMING_SNAKE_CASE__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as handle: pickle.dump(rslt_ , SCREAMING_SNAKE_CASE__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Tuple , *_UpperCamelCase : Dict , **_UpperCamelCase : List[Any] ) ->None: warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = ( first_str_length if first_str_length > second_str_length else second_str_length ) snake_case_ = [] for char_count in range(SCREAMING_SNAKE_CASE__ ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _validate_point(SCREAMING_SNAKE_CASE__ ) _validate_point(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if point: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for item in point: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): snake_case_ = ( '''Expected a list of numbers as input, found ''' F'''{type(SCREAMING_SNAKE_CASE__ ).__name__}''' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) else: snake_case_ = F'''Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}''' raise TypeError(SCREAMING_SNAKE_CASE__ ) else: raise ValueError('''Missing an input''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): _validate_point(SCREAMING_SNAKE_CASE__ ) _validate_point(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if num < 0: return False snake_case_ = num snake_case_ = 0 while num > 0: snake_case_ = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class snake_case_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=1_3 , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : int=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : str=True , _UpperCamelCase : str=9_9 , _UpperCamelCase : int=3_2 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Dict=3_7 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : int=0.1 , _UpperCamelCase : int=0.1 , _UpperCamelCase : List[str]=5_1_2 , _UpperCamelCase : Dict=1_6 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[int]="None" , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Dict=None , ) ->Any: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = relative_attention snake_case_ = position_biased_input snake_case_ = pos_att_type snake_case_ = scope def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ = None if self.use_token_type_ids: snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__( self : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) ->List[Any]: snake_case_ = TFDebertaVaModel(config=_UpperCamelCase ) snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} snake_case_ = [input_ids, input_mask] snake_case_ = model(_UpperCamelCase ) snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) ->Optional[int]: snake_case_ = TFDebertaVaForMaskedLM(config=_UpperCamelCase ) snake_case_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Dict ) ->str: snake_case_ = self.num_labels snake_case_ = TFDebertaVaForSequenceClassification(config=_UpperCamelCase ) snake_case_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.num_labels snake_case_ = TFDebertaVaForTokenClassification(config=_UpperCamelCase ) snake_case_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__( self : int , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ) ->List[str]: snake_case_ = TFDebertaVaForQuestionAnswering(config=_UpperCamelCase ) snake_case_ = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__( self : Any ) ->Tuple: snake_case_ = self.prepare_config_and_inputs() ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) = config_and_inputs snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class snake_case_ ( __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE : Any = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : int = False def snake_case__( self : Union[str, Any] ) ->int: snake_case_ = TFDebertaVaModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 ) def snake_case__( self : str ) ->Dict: self.config_tester.run_common_tests() def snake_case__( self : Optional[int] ) ->List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase ) def snake_case__( self : Union[str, Any] ) ->Tuple: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase ) def snake_case__( self : Tuple ) ->int: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase ) def snake_case__( self : Any ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase ) @slow def snake_case__( self : Any ) ->List[str]: snake_case_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) self.assertIsNotNone(_UpperCamelCase ) @require_tf class snake_case_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='''Model not available yet''' ) def snake_case__( self : Tuple ) ->int: pass @slow def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' ) snake_case_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) snake_case_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) snake_case_ = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0] snake_case_ = tf.constant( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4 )
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCAmelCase_ = '''hf-internal-testing/tiny-random-bert''' lowerCAmelCase_ = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') lowerCAmelCase_ = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any ) ->List[Any]: snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(_UpperCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase ) ) ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: snake_case_ = f.read() self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) self.assertTrue(os.path.isfile(_UpperCamelCase ) ) # File is cached at the same place the second time. snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) # Using a specific revision to test the full commit hash. snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''9b8c223''' ) self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) ) def snake_case__( self : Tuple ) ->Optional[int]: with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): snake_case_ = cached_file('''tiny-random-bert''' , _UpperCamelCase ) with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): snake_case_ = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''aaaa''' ) with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): snake_case_ = cached_file(_UpperCamelCase , '''conf''' ) def snake_case__( self : Optional[int] ) ->int: with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ): snake_case_ = cached_file(_UpperCamelCase , '''conf''' ) with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f: snake_case_ = f.read() self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , '''.no_exist''' , _UpperCamelCase , '''conf''' ) ) ) snake_case_ = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) snake_case_ = cached_file(_UpperCamelCase , '''conf''' , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) snake_case_ = mock.Mock() snake_case_ = 5_0_0 snake_case_ = {} snake_case_ = HTTPError snake_case_ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head: snake_case_ = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCamelCase ) self.assertIsNone(_UpperCamelCase ) # This check we did call the fake head request mock_head.assert_called() def snake_case__( self : Dict ) ->Optional[int]: self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) ) def snake_case__( self : Optional[int] ) ->str: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , _UpperCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , _UpperCamelCase , revision='''ahaha''' ) snake_case_ = get_file_from_repo('''bert-base-cased''' , _UpperCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. snake_case_ = json.loads(open(_UpperCamelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 7_6_8 ) def snake_case__( self : Optional[Any] ) ->Any: with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = Path(_UpperCamelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(_UpperCamelCase , '''a.txt''' ) , str(_UpperCamelCase ) ) self.assertIsNone(get_file_from_repo(_UpperCamelCase , '''b.txt''' ) )
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return [ord(SCREAMING_SNAKE_CASE__ ) - 96 for elem in plain] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return "".join(chr(elem + 96 ) for elem in encoded ) def __SCREAMING_SNAKE_CASE (): snake_case_ = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , SCREAMING_SNAKE_CASE__ ) print('''Decoded:''' , decode(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": main()
8
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Dict , _UpperCamelCase : str ) ->List[str]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): snake_case_ = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(_UpperCamelCase ) def snake_case__( self : Dict ) ->Any: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCamelCase , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__( self : Dict ) ->Any: snake_case_ = '''sgugger/tiny-distilbert-classification''' snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , only_pretrain_model=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__( self : Optional[Any] ) ->Tuple: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__( self : Dict ) ->int: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = AutoConfig.from_pretrained(_UpperCamelCase ) snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCamelCase , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase , [config] ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__( self : List[str] ) ->Union[str, Any]: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = AutoConfig.from_pretrained(_UpperCamelCase ) snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase , [config] ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__( self : Tuple ) ->List[Any]: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__( self : List[str] ) ->Optional[Any]: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = AutoConfig.from_pretrained(_UpperCamelCase ) snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase , [config] ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__( self : Union[str, Any] ) ->Any: snake_case_ = '''patrickvonplaten/t5-tiny-random''' snake_case_ = AutoConfig.from_pretrained(_UpperCamelCase ) snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase , configs=[config] ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''sshleifer/tiny-gpt2''' snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_UpperCamelCase , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_UpperCamelCase , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) snake_case_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_UpperCamelCase , save_to_csv=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCamelCase , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_UpperCamelCase , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_UpperCamelCase , '''env.csv''' ) , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(_UpperCamelCase , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_UpperCamelCase , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(_UpperCamelCase , '''env.csv''' ) ).exists() ) def snake_case__( self : List[str] ) ->Union[str, Any]: snake_case_ = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(_UpperCamelCase : int ): self.assertTrue(hasattr(_UpperCamelCase , '''sequential''' ) ) self.assertTrue(hasattr(_UpperCamelCase , '''cumulative''' ) ) self.assertTrue(hasattr(_UpperCamelCase , '''current''' ) ) self.assertTrue(hasattr(_UpperCamelCase , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCamelCase , '''log.txt''' ) , log_print=_UpperCamelCase , trace_memory_line_by_line=_UpperCamelCase , eager_mode=_UpperCamelCase , multi_process=_UpperCamelCase , ) snake_case_ = TensorFlowBenchmark(_UpperCamelCase ) snake_case_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(_UpperCamelCase , '''log.txt''' ) ).exists() )
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCAmelCase_ = '''Enter the base and the power separated by a comma: ''' lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) lowerCAmelCase_ , lowerCAmelCase_ = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCAmelCase_ = res(xa, ya) lowerCAmelCase_ = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
8
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), } } lowerCAmelCase_ = { '''google/bigbird-roberta-base''': 40_96, '''google/bigbird-roberta-large''': 40_96, '''google/bigbird-base-trivia-itc''': 40_96, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : List[Any] = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Dict="<unk>" , _UpperCamelCase : List[str]="<s>" , _UpperCamelCase : Tuple="</s>" , _UpperCamelCase : Any="<pad>" , _UpperCamelCase : Any="[SEP]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) ->None: snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , sep_token=_UpperCamelCase , mask_token=_UpperCamelCase , cls_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) snake_case_ = vocab_file snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCamelCase ) @property def snake_case__( self : str ) ->List[Any]: return self.sp_model.get_piece_size() def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) ->Any: snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : str , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__( self : Optional[int] , _UpperCamelCase : str ) ->List[str]: return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : List[str] ) ->Tuple: return self.sp_model.piece_to_id(_UpperCamelCase ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : str ) ->List[Any]: snake_case_ = self.sp_model.IdToPiece(_UpperCamelCase ) return token def snake_case__( self : Dict , _UpperCamelCase : Optional[int] ) ->List[str]: snake_case_ = [] snake_case_ = '''''' snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_UpperCamelCase ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(_UpperCamelCase ) snake_case_ = False out_string += self.sp_model.decode(_UpperCamelCase ) return out_string.strip() def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : bool = False , _UpperCamelCase : bool = None , _UpperCamelCase : bool = True , **_UpperCamelCase : List[str] , ) ->str: snake_case_ = kwargs.pop('''use_source_tokenizer''' , _UpperCamelCase ) snake_case_ = self.convert_ids_to_tokens(_UpperCamelCase , skip_special_tokens=_UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 snake_case_ = [] snake_case_ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) snake_case_ = [] sub_texts.append(_UpperCamelCase ) else: current_sub_text.append(_UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: snake_case_ = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_UpperCamelCase ) ) else: snake_case_ = ''''''.join(_UpperCamelCase ) snake_case_ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: snake_case_ = self.clean_up_tokenization(_UpperCamelCase ) return clean_text else: return text def snake_case__( self : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: if not os.path.isdir(_UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( _UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , '''wb''' ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def snake_case__( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def snake_case__( self : List[str] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) ->List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCamelCase )) + [1] return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def snake_case__( self : List[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
8
1
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , _UpperCamelCase : Tuple , _UpperCamelCase : int=7 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : List[Any]=1_8 , _UpperCamelCase : List[str]=3_0 , _UpperCamelCase : Optional[int]=4_0_0 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : List[str]=None , _UpperCamelCase : int=True , ) ->Optional[Any]: snake_case_ = size if size is not None else {'''height''': 1_8, '''width''': 1_8} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = image_size snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = do_resize snake_case_ = size snake_case_ = apply_ocr def snake_case__( self : List[str] ) ->str: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None def snake_case__( self : Any ) ->int: snake_case_ = LayoutLMvaImageProcessingTester(self ) @property def snake_case__( self : List[Any] ) ->Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def snake_case__( self : Optional[int] ) ->Tuple: snake_case_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_UpperCamelCase , '''size''' ) ) self.assertTrue(hasattr(_UpperCamelCase , '''apply_ocr''' ) ) def snake_case__( self : Dict ) ->Tuple: snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} ) snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 ) self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} ) def snake_case__( self : Dict ) ->List[str]: pass def snake_case__( self : Optional[int] ) ->List[str]: # Initialize image_processing snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , _UpperCamelCase ) self.assertIsInstance(encoding.boxes , _UpperCamelCase ) # Test batched snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def snake_case__( self : str ) ->Tuple: # Initialize image_processing snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def snake_case__( self : Union[str, Any] ) ->List[str]: # Initialize image_processing snake_case_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input snake_case_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def snake_case__( self : Optional[int] ) ->str: # with apply_OCR = True snake_case_ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) snake_case_ = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 snake_case_ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _UpperCamelCase ) self.assertListEqual(encoding.boxes , _UpperCamelCase ) # with apply_OCR = False snake_case_ = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ = image_processing(_UpperCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
8
from __future__ import annotations from collections.abc import Generator def __SCREAMING_SNAKE_CASE (): snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1E10 ): snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(SCREAMING_SNAKE_CASE__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(SCREAMING_SNAKE_CASE__ ) n += 2 if __name__ == "__main__": print(solution())
8
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowerCAmelCase_ = logging.getLogger(__name__) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ): snake_case_ = bnb_quantization_config.load_in_abit snake_case_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) snake_case_ = [] # custom device map if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1: snake_case_ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case_ = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ ) snake_case_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case_ = [] snake_case_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ ) # compatibility with peft snake_case_ = load_in_abit snake_case_ = load_in_abit snake_case_ = get_parameter_device(SCREAMING_SNAKE_CASE__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) snake_case_ = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ ) # convert param to the right dtype snake_case_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case_ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ): param.to(SCREAMING_SNAKE_CASE__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): snake_case_ = replace_with_bnb_layers( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ ) snake_case_ = get_quantized_model_device_map( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case_ = True snake_case_ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): if device_map is None: if torch.cuda.is_available(): snake_case_ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) snake_case_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case_ = {} snake_case_ = special_dtypes snake_case_ = no_split_module_classes snake_case_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case_ = get_balanced_memory( SCREAMING_SNAKE_CASE__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) snake_case_ = max_memory snake_case_ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # check if don't have any quantized module on the cpu snake_case_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): if modules_to_not_convert is None: snake_case_ = [] snake_case_, snake_case_ = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ): snake_case_ = False for name, module in model.named_children(): if current_key_name is None: snake_case_ = [] current_key_name.append(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ ) snake_case_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case_ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) snake_case_ = module.weight.data if module.bias is not None: snake_case_ = module.bias.data bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ ) setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = True if len(list(module.children() ) ) > 0: snake_case_, snake_case_ = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # Create a copy of the model with init_empty_weights(): snake_case_ = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case_ = find_tied_parameters(SCREAMING_SNAKE_CASE__ ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case_ = sum(SCREAMING_SNAKE_CASE__ , [] ) snake_case_ = len(SCREAMING_SNAKE_CASE__ ) > 0 # Check if it is a base model snake_case_ = False if hasattr(SCREAMING_SNAKE_CASE__ , '''base_model_prefix''' ): snake_case_ = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case_ = list(model.named_children() ) snake_case_ = [list_modules[-1][0]] # add last module together with tied weights snake_case_ = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) snake_case_ = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ ) # remove ".weight" from the keys snake_case_ = ['''.weight''', '''.bias'''] snake_case_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case_ = name.replace(SCREAMING_SNAKE_CASE__ , '''''' ) filtered_module_names.append(SCREAMING_SNAKE_CASE__ ) return filtered_module_names def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): for m in model.modules(): if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ): return True return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return next(parameter.parameters() ).device def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ ) snake_case_ = param_name snake_case_ = model if "." in tensor_name: snake_case_ = tensor_name.split('''.''' ) for split in splits[:-1]: snake_case_ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) snake_case_ = new_module snake_case_ = splits[-1] # offload weights snake_case_ = False offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , ) else: offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''meta''' , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration lowerCAmelCase_ = 5_00_00 lowerCAmelCase_ = 50_00 lowerCAmelCase_ , lowerCAmelCase_ = os.path.split(__file__) lowerCAmelCase_ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for i in range(SCREAMING_SNAKE_CASE__ ): snake_case_ = dataset[i] @get_duration def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ): snake_case_ = dataset[i : i + batch_size] @get_duration def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ): for i in range(SCREAMING_SNAKE_CASE__ ): snake_case_ = dataset[i] @get_duration def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ): for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = dataset[i : i + batch_size] def __SCREAMING_SNAKE_CASE (): snake_case_ = {'''num examples''': SPEED_TEST_N_EXAMPLES} snake_case_ = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] snake_case_ = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''' ) snake_case_ = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} ) snake_case_ = generate_example_dataset( os.path.join(SCREAMING_SNAKE_CASE__ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={'''list''': (100,)} , ) print('''first set of iterations''' ) for func, kwargs in functions: print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) print('''shuffling dataset''' ) snake_case_ = dataset.shuffle() print('''Second set of iterations (after shuffling''' ) for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) snake_case_ = func( SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE : Tuple = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE : str = "summarizer" SCREAMING_SNAKE_CASE : str = AutoTokenizer SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE : Optional[int] = ["text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def snake_case__( self : str , _UpperCamelCase : int ) ->Optional[int]: return self.pre_processor(_UpperCamelCase , return_tensors='''pt''' , truncation=_UpperCamelCase ) def snake_case__( self : Tuple , _UpperCamelCase : Optional[int] ) ->Tuple: return self.model.generate(**_UpperCamelCase )[0] def snake_case__( self : Optional[Any] , _UpperCamelCase : Optional[int] ) ->Any: return self.pre_processor.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if exponent == 1: return base if exponent % 2 == 0: snake_case_ = _modexpt(SCREAMING_SNAKE_CASE__ , exponent // 2 , SCREAMING_SNAKE_CASE__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(SCREAMING_SNAKE_CASE__ , exponent - 1 , SCREAMING_SNAKE_CASE__ )) % modulo_value def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 1777 , SCREAMING_SNAKE_CASE__ = 1855 , SCREAMING_SNAKE_CASE__ = 8 ): snake_case_ = base for _ in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = _modexpt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 10**digits ) return result if __name__ == "__main__": print(f"""{solution() = }""")
8
from collections import deque from .hash_table import HashTable class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , *_UpperCamelCase : int , **_UpperCamelCase : Tuple ) ->Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) ->Tuple: snake_case_ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_UpperCamelCase ) snake_case_ = self.values[key] def snake_case__( self : List[Any] ) ->str: return ( sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values ) / self.size_table * self.charge_factor ) def snake_case__( self : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=None ) ->str: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0 ): return key return super()._collision_resolution(_UpperCamelCase , _UpperCamelCase )
8
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''BeitFeatureExtractor'''] lowerCAmelCase_ = ['''BeitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BeitForImageClassification''', '''BeitForMaskedImageModeling''', '''BeitForSemanticSegmentation''', '''BeitModel''', '''BeitPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxBeitForImageClassification''', '''FlaxBeitForMaskedImageModeling''', '''FlaxBeitModel''', '''FlaxBeitPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from __future__ import annotations def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # We need to create solution object to save path. snake_case_ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ = run_maze(SCREAMING_SNAKE_CASE__ , 0 , 0 , SCREAMING_SNAKE_CASE__ ) if solved: print('''\n'''.join(str(SCREAMING_SNAKE_CASE__ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) # Final check point. if i == j == (size - 1): snake_case_ = 1 return True snake_case_ = (not i < 0) and (not j < 0) # Check lower bounds snake_case_ = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. snake_case_ = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited snake_case_ = 1 # check for directions if ( run_maze(SCREAMING_SNAKE_CASE__ , i + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j + 1 , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or run_maze(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - 1 , SCREAMING_SNAKE_CASE__ ) ): return True snake_case_ = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
8
1
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class snake_case_ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : int = 1_6 , _UpperCamelCase : int = 8_8 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "geglu" , _UpperCamelCase : Optional[int] = None , ) ->Any: super().__init__() snake_case_ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_UpperCamelCase , attention_head_dim=_UpperCamelCase , in_channels=_UpperCamelCase , num_layers=_UpperCamelCase , dropout=_UpperCamelCase , norm_num_groups=_UpperCamelCase , cross_attention_dim=_UpperCamelCase , attention_bias=_UpperCamelCase , sample_size=_UpperCamelCase , num_vector_embeds=_UpperCamelCase , activation_fn=_UpperCamelCase , num_embeds_ada_norm=_UpperCamelCase , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference snake_case_ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` snake_case_ = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` snake_case_ = [1, 0] def snake_case__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : bool = True , ) ->Optional[Any]: snake_case_ = hidden_states snake_case_ = [] snake_case_ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens snake_case_ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] snake_case_ = self.transformer_index_for_condition[i] snake_case_ = self.transformers[transformer_index]( _UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase , cross_attention_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] snake_case_ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) snake_case_ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_UpperCamelCase )
8
from decimal import Decimal, getcontext from math import ceil, factorial def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''Undefined for non-integers''' ) elif precision < 1: raise ValueError('''Undefined for non-natural numbers''' ) snake_case_ = precision snake_case_ = ceil(precision / 14 ) snake_case_ = 426880 * Decimal(10005 ).sqrt() snake_case_ = 1 snake_case_ = 13591409 snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ ) for k in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(f"""The first {n} digits of pi is: {pi(n)}""")
8
1
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class snake_case_ ( __A ): '''simple docstring''' def snake_case__( self : Union[str, Any] ) ->List[str]: snake_case_ = tempfile.mkdtemp() snake_case_ = 8 # DPR tok snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] snake_case_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ = os.path.join(_UpperCamelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] snake_case_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) ) snake_case_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] snake_case_ = {'''unk_token''': '''<unk>'''} snake_case_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) snake_case_ = os.path.join(_UpperCamelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(_UpperCamelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(_UpperCamelCase ) ) def snake_case__( self : List[Any] ) ->DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def snake_case__( self : str ) ->BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def snake_case__( self : Dict ) ->Optional[Any]: shutil.rmtree(self.tmpdirname ) @require_tokenizers def snake_case__( self : List[Any] ) ->Tuple: snake_case_ = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) snake_case_ = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) snake_case_ = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(_UpperCamelCase ) rag_tokenizer.save_pretrained(_UpperCamelCase ) snake_case_ = RagTokenizer.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder , _UpperCamelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , _UpperCamelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def snake_case__( self : List[str] ) ->Dict: snake_case_ = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) snake_case_ = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] snake_case_ = tokenizer(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @slow def snake_case__( self : Any ) ->Dict: snake_case_ = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) snake_case_ = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] snake_case_ = tokenizer(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase )
8
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class snake_case_ ( __A ): '''simple docstring''' def __init__( self : int , _UpperCamelCase : pyspark.sql.DataFrame , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : str = None , _UpperCamelCase : bool = True , _UpperCamelCase : str = "arrow" , **_UpperCamelCase : Tuple , ) ->str: super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = load_from_cache_file snake_case_ = file_format snake_case_ = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def snake_case__( self : int ) ->Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
8
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = "sew" def __init__( self : Dict , _UpperCamelCase : Any=3_2 , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : List[Any]=1_2 , _UpperCamelCase : Optional[Any]=3_0_7_2 , _UpperCamelCase : List[str]=2 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : str=0.1 , _UpperCamelCase : Tuple=0.1 , _UpperCamelCase : Optional[Any]=0.0 , _UpperCamelCase : int=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : int=0.02 , _UpperCamelCase : Tuple=1e-5 , _UpperCamelCase : List[str]="group" , _UpperCamelCase : int="gelu" , _UpperCamelCase : Tuple=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _UpperCamelCase : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _UpperCamelCase : int=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Union[str, Any]=1_2_8 , _UpperCamelCase : Any=1_6 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[int]=0.05 , _UpperCamelCase : List[Any]=1_0 , _UpperCamelCase : Optional[int]=2 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=1_0 , _UpperCamelCase : Any=0 , _UpperCamelCase : Dict="mean" , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : str=False , _UpperCamelCase : List[Any]=2_5_6 , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=1 , _UpperCamelCase : List[Any]=2 , **_UpperCamelCase : int , ) ->Optional[int]: super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = feat_extract_norm snake_case_ = feat_extract_activation snake_case_ = list(_UpperCamelCase ) snake_case_ = list(_UpperCamelCase ) snake_case_ = list(_UpperCamelCase ) snake_case_ = conv_bias snake_case_ = num_conv_pos_embeddings snake_case_ = num_conv_pos_embedding_groups snake_case_ = len(self.conv_dim ) snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = squeeze_factor snake_case_ = hidden_act snake_case_ = num_attention_heads snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = feat_proj_dropout snake_case_ = final_dropout snake_case_ = layerdrop snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = apply_spec_augment snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks # ctc loss snake_case_ = ctc_loss_reduction snake_case_ = ctc_zero_infinity # sequence classification snake_case_ = use_weighted_layer_sum snake_case_ = classifier_proj_size @property def snake_case__( self : Dict ) ->Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
8
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable lowerCAmelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''DPTFeatureExtractor'''] lowerCAmelCase_ = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
1
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Initialise PyTorch model snake_case_ = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ = MobileBertForPreTraining(SCREAMING_SNAKE_CASE__ ) # Load weights from tf checkpoint snake_case_ = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--mobilebert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained MobileBERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
8
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = { '''vocab_file''': { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''', }, '''tokenizer_file''': { '''unc-nlp/lxmert-base-uncased''': ( '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': 5_12, } lowerCAmelCase_ = { '''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True}, } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE : Any = LxmertTokenizer def __init__( self : Union[str, Any] , _UpperCamelCase : int=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Dict=True , _UpperCamelCase : Any="[UNK]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : List[Any]="[PAD]" , _UpperCamelCase : Union[str, Any]="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : List[str]=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : List[str] , ) ->Any: super().__init__( _UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCamelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCamelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCamelCase ) != tokenize_chinese_chars ): snake_case_ = getattr(_UpperCamelCase , normalizer_state.pop('''type''' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**_UpperCamelCase ) snake_case_ = do_lower_case def snake_case__( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None ) ->List[Any]: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case__( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) ->List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case__( self : Any , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) ->Tuple[str]: snake_case_ = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase ) return tuple(_UpperCamelCase )
8
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = generate_pascal_triangle(SCREAMING_SNAKE_CASE__ ) for row_idx in range(SCREAMING_SNAKE_CASE__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) snake_case_ = [] for current_row_idx in range(SCREAMING_SNAKE_CASE__ ): snake_case_ = populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) triangle.append(SCREAMING_SNAKE_CASE__ ) return triangle def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_, snake_case_ = 1, 1 for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ): calculate_current_element( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return current_row def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): snake_case_ = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ = triangle[current_row_idx - 1][current_col_idx] snake_case_ = above_to_left_elt + above_to_right_elt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) snake_case_ = [[1]] for row_index in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ = [0] + result[-1] + [0] snake_case_ = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ = sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) ) snake_case_ = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ = row_first_half + row_second_half result.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (): from collections.abc import Callable from timeit import timeit def benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None: snake_case_ = F'''{func.__name__}({value})''' snake_case_ = timeit(F'''__main__.{call}''' , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
import math def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ): try: snake_case_ = int(SCREAMING_SNAKE_CASE__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) snake_case_ = [] snake_case_ = 2 while len(SCREAMING_SNAKE_CASE__ ) < nth: if is_prime(SCREAMING_SNAKE_CASE__ ): primes.append(SCREAMING_SNAKE_CASE__ ) num += 1 else: num += 1 return primes[len(SCREAMING_SNAKE_CASE__ ) - 1] if __name__ == "__main__": print(f"""{solution() = }""")
8
1
lowerCAmelCase_ = 2_56 # Modulus to hash a string lowerCAmelCase_ = 1_00_00_03 def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = len(SCREAMING_SNAKE_CASE__ ) snake_case_ = len(SCREAMING_SNAKE_CASE__ ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(SCREAMING_SNAKE_CASE__ ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __SCREAMING_SNAKE_CASE (): snake_case_ = '''abc1abc12''' snake_case_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' snake_case_ = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Test 2) snake_case_ = '''ABABX''' snake_case_ = '''ABABZABABYABABX''' assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Test 3) snake_case_ = '''AAAB''' snake_case_ = '''ABAAAAAB''' assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Test 4) snake_case_ = '''abcdabcy''' snake_case_ = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Test 5) snake_case_ = '''Lü''' snake_case_ = '''Lüsai''' assert rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = '''Lue''' assert not rabin_karp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
8
from sklearn.metrics import mean_squared_error import datasets lowerCAmelCase_ = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' lowerCAmelCase_ = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' lowerCAmelCase_ = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def snake_case__( self : List[Any] ) ->Optional[int]: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def snake_case__( self : int , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any=None , _UpperCamelCase : Optional[int]="uniform_average" , _UpperCamelCase : Tuple=True ) ->Tuple: snake_case_ = mean_squared_error( _UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase ) return {"mse": mse}
8
1
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor lowerCAmelCase_ = logging.get_logger(__name__) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Tuple , *_UpperCamelCase : Any , **_UpperCamelCase : Optional[Any] ) ->None: warnings.warn( '''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use OwlViTImageProcessor instead.''' , _UpperCamelCase , ) super().__init__(*_UpperCamelCase , **_UpperCamelCase )
8
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = nums.pop(0 ) snake_case_ = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): def backtrack(SCREAMING_SNAKE_CASE__ ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_, snake_case_ = nums[i], nums[start] backtrack(start + 1 ) snake_case_, snake_case_ = nums[i], nums[start] # backtrack snake_case_ = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowerCAmelCase_ = permutea([1, 2, 3]) print(res) doctest.testmod()
8
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class snake_case_ ( __A ): '''simple docstring''' def __init__( self : Any , *_UpperCamelCase : int , **_UpperCamelCase : Optional[Any] ) ->Union[str, Any]: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) self.check_model_type(_UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : List[str] ) ->str: snake_case_, snake_case_ = {}, {} if padding is not None: snake_case_ = padding if truncation is not None: snake_case_ = truncation if top_k is not None: snake_case_ = top_k return preprocess_params, {}, postprocess_params def __call__( self : Optional[Any] , _UpperCamelCase : Union["Image.Image", str] , _UpperCamelCase : str = None , **_UpperCamelCase : Any ) ->Any: if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ = {'''image''': image, '''question''': question} else: snake_case_ = image snake_case_ = super().__call__(_UpperCamelCase , **_UpperCamelCase ) return results def snake_case__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=False , _UpperCamelCase : List[Any]=False ) ->Optional[Any]: snake_case_ = load_image(inputs['''image'''] ) snake_case_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase ) snake_case_ = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) model_inputs.update(_UpperCamelCase ) return model_inputs def snake_case__( self : Union[str, Any] , _UpperCamelCase : List[Any] ) ->List[Any]: snake_case_ = self.model(**_UpperCamelCase ) return model_outputs def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int]=5 ) ->Any: if top_k > self.model.config.num_labels: snake_case_ = self.model.config.num_labels if self.framework == "pt": snake_case_ = model_outputs.logits.sigmoid()[0] snake_case_, snake_case_ = probs.topk(_UpperCamelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) snake_case_ = scores.tolist() snake_case_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
8
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
1
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = "xlnet" SCREAMING_SNAKE_CASE : Union[str, Any] = ["mems"] SCREAMING_SNAKE_CASE : Tuple = { "n_token": "vocab_size", # Backward compatibility "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : int , _UpperCamelCase : Union[str, Any]=3_2_0_0_0 , _UpperCamelCase : int=1_0_2_4 , _UpperCamelCase : Any=2_4 , _UpperCamelCase : Union[str, Any]=1_6 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]="bi" , _UpperCamelCase : Tuple=0.02 , _UpperCamelCase : Optional[int]=1e-12 , _UpperCamelCase : Any=0.1 , _UpperCamelCase : int=5_1_2 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[str]=False , _UpperCamelCase : Tuple=False , _UpperCamelCase : int=-1 , _UpperCamelCase : int=False , _UpperCamelCase : int="last" , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[Any]="tanh" , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Tuple=5 , _UpperCamelCase : List[Any]=5 , _UpperCamelCase : Any=5 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : int=2 , **_UpperCamelCase : Dict , ) ->List[str]: snake_case_ = vocab_size snake_case_ = d_model snake_case_ = n_layer snake_case_ = n_head if d_model % n_head != 0: raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) snake_case_ = d_model // n_head snake_case_ = ff_activation snake_case_ = d_inner snake_case_ = untie_r snake_case_ = attn_type snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = dropout snake_case_ = mem_len snake_case_ = reuse_len snake_case_ = bi_data snake_case_ = clamp_len snake_case_ = same_length snake_case_ = summary_type snake_case_ = summary_use_proj snake_case_ = summary_activation snake_case_ = summary_last_dropout snake_case_ = start_n_top snake_case_ = end_n_top snake_case_ = bos_token_id snake_case_ = pad_token_id snake_case_ = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , _UpperCamelCase , ) snake_case_ = kwargs['''use_cache'''] snake_case_ = use_mems_eval snake_case_ = use_mems_train super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) @property def snake_case__( self : List[str] ) ->Union[str, Any]: logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def snake_case__( self : Dict , _UpperCamelCase : List[Any] ) ->int: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
8
from ..utils import DummyObject, requires_backends class snake_case_ ( metaclass=__A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"] def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any: requires_backends(self , ['''note_seq'''] ) @classmethod def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int: requires_backends(cls , ['''note_seq'''] ) @classmethod def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]: requires_backends(cls , ['''note_seq'''] )
8
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
8
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = "vit_msn" def __init__( self : Dict , _UpperCamelCase : Optional[int]=7_6_8 , _UpperCamelCase : Optional[Any]=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : str=3_0_7_2 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : List[str]=0.02 , _UpperCamelCase : List[Any]=1e-06 , _UpperCamelCase : Any=2_2_4 , _UpperCamelCase : Optional[Any]=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=True , **_UpperCamelCase : Any , ) ->int: super().__init__(**_UpperCamelCase ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias
8
1
import sys lowerCAmelCase_ = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = N ): snake_case_ = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ): snake_case_ = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ = product return largest_product if __name__ == "__main__": print(f"""{solution() = }""")
8
from __future__ import annotations from math import pi, sqrt def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
8
1
class snake_case_ : '''simple docstring''' def __init__( self : Optional[int] , _UpperCamelCase : int ) ->Any: snake_case_ = n snake_case_ = [None] * self.n snake_case_ = 0 # index of the first element snake_case_ = 0 snake_case_ = 0 def __len__( self : Optional[int] ) ->int: return self.size def snake_case__( self : str ) ->bool: return self.size == 0 def snake_case__( self : str ) ->Tuple: return False if self.is_empty() else self.array[self.front] def snake_case__( self : Dict , _UpperCamelCase : int ) ->Any: if self.size >= self.n: raise Exception('''QUEUE IS FULL''' ) snake_case_ = data snake_case_ = (self.rear + 1) % self.n self.size += 1 return self def snake_case__( self : List[Any] ) ->str: if self.size == 0: raise Exception('''UNDERFLOW''' ) snake_case_ = self.array[self.front] snake_case_ = None snake_case_ = (self.front + 1) % self.n self.size -= 1 return temp
8
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return x + 2 class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) snake_case_ = '''x = y''' snake_case_ = {'''y''': 5} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 5, '''y''': 5} ) def snake_case__( self : Dict ) ->Optional[int]: snake_case_ = '''y = add_two(x)''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result is None assert "tried to execute add_two" in out.out def snake_case__( self : Union[str, Any] ) ->Dict: snake_case_ = '''x = 3''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3} ) def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Dict ) ->str: snake_case_ = '''x = 3\ny = 5''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 5} ) def snake_case__( self : str ) ->Tuple: snake_case_ = '''text = f\'This is x: {x}.\'''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} ) def snake_case__( self : Optional[Any] ) ->List[str]: snake_case_ = '''if x <= 3:\n y = 2\nelse:\n y = 5''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 2} ) snake_case_ = {'''x''': 8} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 8, '''y''': 5} ) def snake_case__( self : str ) ->str: snake_case_ = '''test_list = [x, add_two(x)]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , [3, 5] ) self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = '''y = x''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {} , state=_UpperCamelCase ) assert result == 3 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''y''': 3} ) def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''test_list = [x, add_two(x)]\ntest_list[1]''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_list''': [3, 5]} ) snake_case_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' snake_case_ = {'''x''': 3} snake_case_ = evaluate(_UpperCamelCase , {'''add_two''': add_two} , state=_UpperCamelCase ) assert result == 5 self.assertDictEqual(_UpperCamelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def snake_case__( self : Optional[Any] ) ->int: snake_case_ = '''x = 0\nfor i in range(3):\n x = i''' snake_case_ = {} snake_case_ = evaluate(_UpperCamelCase , {'''range''': range} , state=_UpperCamelCase ) assert result == 2 self.assertDictEqual(_UpperCamelCase , {'''x''': 2, '''i''': 2} )
8
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCAmelCase_ = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_28, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55) lowerCAmelCase_ = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) lowerCAmelCase_ = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions lowerCAmelCase_ = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) lowerCAmelCase_ = tf.keras.preprocessing.image.img_to_array(test_image) lowerCAmelCase_ = np.expand_dims(test_image, axis=0) lowerCAmelCase_ = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCAmelCase_ = '''Normal''' if result[0][0] == 1: lowerCAmelCase_ = '''Abnormality detected'''
8
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]: return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy''' def snake_case__( self : Any ) ->List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return image def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = '''bf16''' if fpaa else None snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained( _UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase ) return model, params def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int: snake_case_ = jnp.bfloataa if fpaa else jnp.floataa snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]: snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict: snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase ) snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase ) snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase ) snake_case_ = model.apply( {'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample assert sample.shape == latents.shape snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
8
1
import math from collections.abc import Callable def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = xa snake_case_ = xa while True: if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ): raise ZeroDivisionError('''float division by zero, could not find root''' ) snake_case_ = x_na - ( function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case_ = x_na snake_case_ = x_na def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
8
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __SCREAMING_SNAKE_CASE (*SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = list(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''CUDA out of memory.''', # CUDA OOM '''cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.''', # CUDNN SNAFU '''DefaultCPUAllocator: can\'t allocate memory''', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 128 ): if function is None: return functools.partial(SCREAMING_SNAKE_CASE__ , starting_batch_size=SCREAMING_SNAKE_CASE__ ) snake_case_ = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() snake_case_ = list(inspect.signature(SCREAMING_SNAKE_CASE__ ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE__ ) < (len(SCREAMING_SNAKE_CASE__ ) + 1): snake_case_ = ''', '''.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F'''Batch size was passed into `{function.__name__}` as the first argument when called.''' F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('''No executable batch size found, reached zero.''' ) try: return function(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE__ ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
8
1