code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput a_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __SCREAMING_SNAKE_CASE ( snake_case__ ): def __init__( self : List[str] , *__lowercase : Any , __lowercase : str=None , __lowercase : int=None , __lowercase : Tuple=None , **__lowercase : Tuple ) -> str: super().__init__(*lowercase_ , **lowercase_ ) SCREAMING_SNAKE_CASE__ : List[str] =eval_examples SCREAMING_SNAKE_CASE__ : str =post_process_function SCREAMING_SNAKE_CASE__ : int =quant_trainer_args SCREAMING_SNAKE_CASE__ : Optional[int] =1_28 # default number of calibration samples def __magic_name__ ( self : Any , __lowercase : Union[str, Any]=None ) -> Tuple: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =calib_dataset if calib_dataset is not None else self.calib_dataset SCREAMING_SNAKE_CASE__ : str =self._remove_unused_columns(lowercase_ , description='''Calibration''' ) return DataLoader( lowercase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase_ , ) def __magic_name__ ( self : List[Any] , __lowercase : Optional[int]=None ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.train_dataset if calib_dataset is None else calib_dataset SCREAMING_SNAKE_CASE__ : str =self.get_calib_dataloader(lowercase_ ) SCREAMING_SNAKE_CASE__ : Any =self.model quant_trainer.configure_model(lowercase_ , self.quant_trainer_args , calib=lowercase_ ) model.eval() quant_trainer.enable_calibration(lowercase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(lowercase_ ): # Prediction step SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prediction_step(lowercase_ , lowercase_ , prediction_loss_only=lowercase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(lowercase_ , self.quant_trainer_args ) SCREAMING_SNAKE_CASE__ : Optional[int] =model def __magic_name__ ( self : Any , __lowercase : Dict=None , __lowercase : Optional[int]=None , __lowercase : Optional[Any]=None , __lowercase : Optional[int] = "eval" ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =self.eval_dataset if eval_dataset is None else eval_dataset SCREAMING_SNAKE_CASE__ : Any =self.get_eval_dataloader(lowercase_ ) SCREAMING_SNAKE_CASE__ : int =self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE__ : List[str] =self.compute_metrics SCREAMING_SNAKE_CASE__ : str =None SCREAMING_SNAKE_CASE__ : Optional[int] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE__ : Any =eval_loop( lowercase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , ) finally: SCREAMING_SNAKE_CASE__ : Any =compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: SCREAMING_SNAKE_CASE__ : Tuple =self.post_process_function(lowercase_ , lowercase_ , output.predictions ) SCREAMING_SNAKE_CASE__ : Any =self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): SCREAMING_SNAKE_CASE__ : Dict =metrics.pop(lowercase_ ) self.log(lowercase_ ) else: SCREAMING_SNAKE_CASE__ : List[Any] ={} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) SCREAMING_SNAKE_CASE__ : int =self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ ) return metrics def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : Optional[int] , __lowercase : Tuple=None , __lowercase : Union[str, Any] = "test" ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.get_test_dataloader(lowercase_ ) # Temporarily disable metric computation, we will do it in the loop here. SCREAMING_SNAKE_CASE__ : Any =self.compute_metrics SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Any =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: SCREAMING_SNAKE_CASE__ : int =eval_loop( lowercase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , ) finally: SCREAMING_SNAKE_CASE__ : Dict =compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output SCREAMING_SNAKE_CASE__ : List[str] =self.post_process_function(lowercase_ , lowercase_ , output.predictions , '''predict''' ) SCREAMING_SNAKE_CASE__ : Any =self.compute_metrics(lowercase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): SCREAMING_SNAKE_CASE__ : int =metrics.pop(lowercase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ ) def __magic_name__ ( self : Dict , __lowercase : Optional[int]="./" ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : int =self.eval_dataset SCREAMING_SNAKE_CASE__ : Any =self.get_eval_dataloader(lowercase_ ) SCREAMING_SNAKE_CASE__ : int =next(iter(lowercase_ ) ) # saving device - to make it consistent SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple SCREAMING_SNAKE_CASE__ : str =tuple(v.to(lowercase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer SCREAMING_SNAKE_CASE__ : Union[str, Any] =True SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model.to(lowercase_ ) model.eval() model.float() SCREAMING_SNAKE_CASE__ : Optional[Any] =model.module if hasattr(lowercase_ , '''module''' ) else model quant_trainer.configure_model(lowercase_ , self.quant_trainer_args ) SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(lowercase_ , '''model.onnx''' ) logger.info(F"exporting model to {output_model_file}" ) SCREAMING_SNAKE_CASE__ : Dict ={0: "batch_size", 1: "seq_len"} torch.onnx.export( lowercase_ , lowercase_ , lowercase_ , export_params=lowercase_ , opset_version=13 , do_constant_folding=lowercase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=lowercase_ , ) logger.info('''onnx export finished''' )
704
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a_ = False a_ = False def _a( UpperCamelCase__ : Namespace ): '''simple docstring''' return TrainCommand(UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): @staticmethod def __magic_name__ ( __lowercase : ArgumentParser ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' ) SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =args.output SCREAMING_SNAKE_CASE__ : str =args.column_label SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text SCREAMING_SNAKE_CASE__ : Tuple =args.column_id self.logger.info(F"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"Loading dataset from {args.train_data}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =None if args.validation_data: self.logger.info(F"Loading validation dataset from {args.validation_data}" ) SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon def __magic_name__ ( self : Any ) -> str: if self.framework == "tf": return self.run_tf() return self.run_torch() def __magic_name__ ( self : Optional[int] ) -> Tuple: raise NotImplementedError def __magic_name__ ( self : Dict ) -> List[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict ): '''simple docstring''' return number | (1 << position) def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any] ): '''simple docstring''' return number & ~(1 << position) def _a( UpperCamelCase__ : int, UpperCamelCase__ : str ): '''simple docstring''' return number ^ (1 << position) def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Dict ): '''simple docstring''' return ((number >> position) & 1) == 1 def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple ): '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
705
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""] snake_case_ = [ """image_embeds""", """negative_image_embeds""", """image""", ] snake_case_ = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[str] ) -> Tuple: return 32 @property def __magic_name__ ( self : List[str] ) -> str: return 32 @property def __magic_name__ ( self : Any ) -> Optional[int]: return self.time_input_dim @property def __magic_name__ ( self : List[Any] ) -> int: return self.time_input_dim * 4 @property def __magic_name__ ( self : Tuple ) -> Optional[int]: return 1_00 @property def __magic_name__ ( self : Union[str, Any] ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase ) return model @property def __magic_name__ ( self : Dict ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs ) return model def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : str ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu''' SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple =np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : int =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class __SCREAMING_SNAKE_CASE ( __lowercase ): snake_case_ = """beit""" def __init__( self : str , __lowercase : Any=81_92 , __lowercase : List[Any]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Optional[Any]=12 , __lowercase : Union[str, Any]=30_72 , __lowercase : Optional[int]="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : str=0.02 , __lowercase : List[Any]=1e-12 , __lowercase : List[Any]=2_24 , __lowercase : Optional[Any]=16 , __lowercase : Dict=3 , __lowercase : Tuple=False , __lowercase : Dict=False , __lowercase : Optional[int]=False , __lowercase : Optional[Any]=False , __lowercase : Any=0.1 , __lowercase : str=0.1 , __lowercase : str=True , __lowercase : Union[str, Any]=[3, 5, 7, 11] , __lowercase : Dict=[1, 2, 3, 6] , __lowercase : Union[str, Any]=True , __lowercase : int=0.4 , __lowercase : List[Any]=2_56 , __lowercase : List[Any]=1 , __lowercase : int=False , __lowercase : List[str]=2_55 , **__lowercase : List[Any] , ) -> Union[str, Any]: super().__init__(**__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_size SCREAMING_SNAKE_CASE__ : Dict =num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[Any] =num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] =intermediate_size SCREAMING_SNAKE_CASE__ : str =hidden_act SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Any =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Dict =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : Tuple =image_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =patch_size SCREAMING_SNAKE_CASE__ : List[str] =num_channels SCREAMING_SNAKE_CASE__ : int =use_mask_token SCREAMING_SNAKE_CASE__ : Optional[Any] =use_absolute_position_embeddings SCREAMING_SNAKE_CASE__ : str =use_relative_position_bias SCREAMING_SNAKE_CASE__ : Dict =use_shared_relative_position_bias SCREAMING_SNAKE_CASE__ : Optional[Any] =layer_scale_init_value SCREAMING_SNAKE_CASE__ : int =drop_path_rate SCREAMING_SNAKE_CASE__ : Any =use_mean_pooling # decode head attributes (semantic segmentation) SCREAMING_SNAKE_CASE__ : Any =out_indices SCREAMING_SNAKE_CASE__ : List[Any] =pool_scales # auxiliary head attributes (semantic segmentation) SCREAMING_SNAKE_CASE__ : List[Any] =use_auxiliary_head SCREAMING_SNAKE_CASE__ : int =auxiliary_loss_weight SCREAMING_SNAKE_CASE__ : Any =auxiliary_channels SCREAMING_SNAKE_CASE__ : Tuple =auxiliary_num_convs SCREAMING_SNAKE_CASE__ : Tuple =auxiliary_concat_input SCREAMING_SNAKE_CASE__ : Any =semantic_loss_ignore_index class __SCREAMING_SNAKE_CASE ( __lowercase ): snake_case_ = version.parse("""1.11""" ) @property def __magic_name__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __magic_name__ ( self : Optional[int] ) -> float: return 1e-4
706
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): snake_case_ = 42 # Cache store of keys snake_case_ = 42 # References of the keys in cache snake_case_ = 10 # Maximum capacity of cache def __init__( self : Dict , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Any =deque() SCREAMING_SNAKE_CASE__ : str =set() if not n: SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =n def __magic_name__ ( self : List[str] , __lowercase : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop() self.key_reference.remove(__lowercase ) else: self.dq_store.remove(__lowercase ) self.dq_store.appendleft(__lowercase ) self.key_reference.add(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__lowercase ) def __repr__( self : List[Any] ) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}" if __name__ == "__main__": import doctest doctest.testmod() a_ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
665
0
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """""" snake_case_ = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self : Union[str, Any] , __lowercase : Optional[DatasetInfo] = None , __lowercase : Optional[str] = None , **__lowercase : Dict , ) -> List[Any]: super().__init__(self , **__UpperCamelCase ) SCREAMING_SNAKE_CASE__ : List[Any] =repo_info SCREAMING_SNAKE_CASE__ : Any =token SCREAMING_SNAKE_CASE__ : int =None def __magic_name__ ( self : Optional[int] ) -> Optional[int]: if self.dir_cache is None: SCREAMING_SNAKE_CASE__ : List[str] ={} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes SCREAMING_SNAKE_CASE__ : Union[str, Any] ={ '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(__UpperCamelCase ): {'''name''': str(__UpperCamelCase ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __magic_name__ ( self : Any , __lowercase : str , __lowercase : str = "rb" , **__lowercase : Dict , ) -> Any: if not isinstance(self.repo_info , __UpperCamelCase ): raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" ) SCREAMING_SNAKE_CASE__ : Dict =hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha ) return fsspec.open( __UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def __magic_name__ ( self : List[str] , __lowercase : List[str] , **__lowercase : Dict ) -> int: self._get_dirs() SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._strip_protocol(__UpperCamelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__UpperCamelCase ) def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=False , **__lowercase : Any ) -> Any: self._get_dirs() SCREAMING_SNAKE_CASE__ : List[Any] =PurePosixPath(path.strip('''/''' ) ) SCREAMING_SNAKE_CASE__ : Any ={} for p, f in self.dir_cache.items(): SCREAMING_SNAKE_CASE__ : Any =PurePosixPath(p.strip('''/''' ) ) SCREAMING_SNAKE_CASE__ : Any =p.parent if root == path: SCREAMING_SNAKE_CASE__ : List[Any] =f SCREAMING_SNAKE_CASE__ : Dict =list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
707
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a_ = list[list[float | int]] def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col] SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0] SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : Tuple =0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, UpperCamelCase__ ): for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col] for cola in range(UpperCamelCase__, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ ) ] def _a( UpperCamelCase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Dict =y_val SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ ) def interpolated_func(UpperCamelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCamelCase__ ) ) return interpolated_func def _a( UpperCamelCase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Any =1 while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ): x_val += 1 ret += poly(UpperCamelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __SCREAMING_SNAKE_CASE ( A_ ): '''simple docstring''' def __init__( self : Tuple , __lowercase : List[Any] , __lowercase : List[str] = None , __lowercase : int = None , __lowercase : Tuple = None , __lowercase : Dict = False , __lowercase : Any = False , __lowercase : int = None , __lowercase : Optional[int] = None , **__lowercase : Union[str, Any] , ) -> int: super().__init__( __lowercase , split=__lowercase , features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , streaming=__lowercase , num_proc=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : Any =field SCREAMING_SNAKE_CASE__ : List[Any] =path_or_paths if isinstance(__lowercase , __lowercase ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE__ : Optional[Any] =Json( cache_dir=__lowercase , data_files=__lowercase , features=__lowercase , field=__lowercase , **__lowercase , ) def __magic_name__ ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE__ : Dict =None SCREAMING_SNAKE_CASE__ : str =None SCREAMING_SNAKE_CASE__ : int =None SCREAMING_SNAKE_CASE__ : Optional[Any] =None self.builder.download_and_prepare( download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE__ : Any =self.builder.as_dataset( split=self.split , verification_mode=__lowercase , in_memory=self.keep_in_memory ) return dataset class __SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : Tuple , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[int] = None , __lowercase : Optional[Any] = None , **__lowercase : Any , ) -> Dict: if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) SCREAMING_SNAKE_CASE__ : Optional[Any] =dataset SCREAMING_SNAKE_CASE__ : Union[str, Any] =path_or_buf SCREAMING_SNAKE_CASE__ : Optional[Any] =batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE SCREAMING_SNAKE_CASE__ : List[Any] =num_proc SCREAMING_SNAKE_CASE__ : List[str] ="""utf-8""" SCREAMING_SNAKE_CASE__ : int =to_json_kwargs def __magic_name__ ( self : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[Any] =self.to_json_kwargs.pop('''path_or_buf''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =self.to_json_kwargs.pop('''orient''' , '''records''' ) SCREAMING_SNAKE_CASE__ : List[str] =self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False ) SCREAMING_SNAKE_CASE__ : str =self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True ) SCREAMING_SNAKE_CASE__ : Tuple =self.to_json_kwargs.pop('''compression''' , __lowercase ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"`datasets` currently does not support {compression} compression" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowercase ) as buffer: SCREAMING_SNAKE_CASE__ : str =self._write(file_obj=__lowercase , orient=__lowercase , lines=__lowercase , index=__lowercase , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"The compression parameter is not supported when writing to a buffer, but compression={compression}" ''' was passed. Please provide a local path instead.''' ) SCREAMING_SNAKE_CASE__ : str =self._write( file_obj=self.path_or_buf , orient=__lowercase , lines=__lowercase , index=__lowercase , **self.to_json_kwargs ) return written def __magic_name__ ( self : int , __lowercase : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =args SCREAMING_SNAKE_CASE__ : Optional[Any] =query_table( table=self.dataset.data , key=slice(__lowercase , offset + self.batch_size ) , indices=self.dataset._indices , ) SCREAMING_SNAKE_CASE__ : List[str] =batch.to_pandas().to_json( path_or_buf=__lowercase , orient=__lowercase , lines=__lowercase , index=__lowercase , **__lowercase ) if not json_str.endswith('''\n''' ): json_str += "\n" return json_str.encode(self.encoding ) def __magic_name__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Tuple , **__lowercase : Optional[int] , ) -> List[str]: SCREAMING_SNAKE_CASE__ : List[str] =0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): SCREAMING_SNAKE_CASE__ : Optional[int] =self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__lowercase ) else: SCREAMING_SNAKE_CASE__ : int =len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowercase , __lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ): written += file_obj.write(__lowercase ) return written
708
'''simple docstring''' def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE__ : Union[str, Any] =left SCREAMING_SNAKE_CASE__ : Optional[Any] =point elif point > right: SCREAMING_SNAKE_CASE__ : Optional[int] =right SCREAMING_SNAKE_CASE__ : Tuple =point else: if item < current_item: SCREAMING_SNAKE_CASE__ : str =point - 1 else: SCREAMING_SNAKE_CASE__ : Tuple =point + 1 return None def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ ) def _a( UpperCamelCase__ : Dict ): '''simple docstring''' if collection != sorted(UpperCamelCase__ ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys a_ = 0 if debug == 1: a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') a_ = 6_7 a_ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
665
0
'''simple docstring''' from __future__ import annotations from dataclasses import dataclass @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = 42 snake_case_ = None snake_case_ = None def _a( UpperCamelCase__ : TreeNode | None ): '''simple docstring''' def is_valid_tree(UpperCamelCase__ : TreeNode | None ) -> bool: if node is None: return True if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(lowerCAmelCase__ ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( UpperCamelCase__ : TreeNode | None, UpperCamelCase__ : float, UpperCamelCase__ : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left, lowerCAmelCase__, node.data ) and is_binary_search_tree_recursive_check( node.right, node.data, lowerCAmelCase__ ) ) return is_binary_search_tree_recursive_check(lowerCAmelCase__, -float('''inf''' ), float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
709
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @require_tf def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @slow @require_torch def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : str =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
665
0
'''simple docstring''' from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _a( UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =int(number**0.5 ) return number == sq * sq def _a( UpperCamelCase__ : str, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den SCREAMING_SNAKE_CASE__ : Optional[int] =x_den * y_den * z_den SCREAMING_SNAKE_CASE__ : Union[str, Any] =gcd(UpperCamelCase__, UpperCamelCase__ ) top //= hcf bottom //= hcf return top, bottom def _a( UpperCamelCase__ : Any = 3_5 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =set() SCREAMING_SNAKE_CASE__ : str =4_2 SCREAMING_SNAKE_CASE__ : Optional[Any] =Fraction(0 ) SCREAMING_SNAKE_CASE__ : Dict =4_2 for x_num in range(1, order + 1 ): for x_den in range(x_num + 1, order + 1 ): for y_num in range(1, order + 1 ): for y_den in range(y_num + 1, order + 1 ): # n=1 SCREAMING_SNAKE_CASE__ : int =x_num * y_den + x_den * y_num SCREAMING_SNAKE_CASE__ : List[Any] =x_den * y_den SCREAMING_SNAKE_CASE__ : Optional[int] =gcd(UpperCamelCase__, UpperCamelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE__ : Dict =add_three( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) unique_s.add(UpperCamelCase__ ) # n=2 SCREAMING_SNAKE_CASE__ : Any =( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) SCREAMING_SNAKE_CASE__ : List[Any] =x_den * x_den * y_den * y_den if is_sq(UpperCamelCase__ ) and is_sq(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =int(sqrt(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : Tuple =int(sqrt(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : List[str] =gcd(UpperCamelCase__, UpperCamelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE__ : Optional[int] =add_three( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) unique_s.add(UpperCamelCase__ ) # n=-1 SCREAMING_SNAKE_CASE__ : Optional[int] =x_num * y_num SCREAMING_SNAKE_CASE__ : str =x_den * y_num + x_num * y_den SCREAMING_SNAKE_CASE__ : Optional[Any] =gcd(UpperCamelCase__, UpperCamelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE__ : List[Any] =add_three( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) unique_s.add(UpperCamelCase__ ) # n=2 SCREAMING_SNAKE_CASE__ : Tuple =x_num * x_num * y_num * y_num SCREAMING_SNAKE_CASE__ : Optional[Any] =( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCamelCase__ ) and is_sq(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =int(sqrt(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[int] =int(sqrt(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ : int =gcd(UpperCamelCase__, UpperCamelCase__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: SCREAMING_SNAKE_CASE__ : List[str] =add_three( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) unique_s.add(UpperCamelCase__ ) for num, den in unique_s: total += Fraction(UpperCamelCase__, UpperCamelCase__ ) return total.denominator + total.numerator if __name__ == "__main__": print(F'''{solution() = }''')
710
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = JukeboxTokenizer snake_case_ = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def __magic_name__ ( self : Optional[int] ) -> str: import torch SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : str =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __magic_name__ ( self : Any ) -> List[str]: import torch SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
665
0
'''simple docstring''' from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ = logging.get_logger(__name__) @add_end_docstrings(lowercase_ ) class __SCREAMING_SNAKE_CASE ( lowercase_ ): def __init__( self : int , **__lowercase : Any ) -> Dict: super().__init__(**UpperCamelCase__ ) if self.framework != "pt": raise ValueError(F"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self : Optional[int] , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : Any ) -> Optional[Any]: return super().__call__(UpperCamelCase__ , **UpperCamelCase__ ) def __magic_name__ ( self : int , **__lowercase : List[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Dict ={} if "candidate_labels" in kwargs: SCREAMING_SNAKE_CASE__ : List[Any] =kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: SCREAMING_SNAKE_CASE__ : Tuple =kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def __magic_name__ ( self : Dict , __lowercase : Any , __lowercase : Dict=None , __lowercase : List[str]="This is a sound of {}." ) -> Optional[int]: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png SCREAMING_SNAKE_CASE__ : List[Any] =requests.get(UpperCamelCase__ ).content else: with open(UpperCamelCase__ , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ : Tuple =f.read() if isinstance(UpperCamelCase__ , UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Dict =ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate ) if not isinstance(UpperCamelCase__ , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) SCREAMING_SNAKE_CASE__ : List[str] =self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =candidate_labels SCREAMING_SNAKE_CASE__ : Optional[int] =[hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels] SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =[text_inputs] return inputs def __magic_name__ ( self : int , __lowercase : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : int =model_inputs.pop('''candidate_labels''' ) SCREAMING_SNAKE_CASE__ : str =model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =text_inputs[0] else: # Batching case. SCREAMING_SNAKE_CASE__ : Dict =text_inputs[0][0] SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model(**UpperCamelCase__ , **UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int ={ '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def __magic_name__ ( self : Dict , __lowercase : List[str] ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =model_outputs.pop('''candidate_labels''' ) SCREAMING_SNAKE_CASE__ : Any =model_outputs['''logits'''][0] if self.framework == "pt": SCREAMING_SNAKE_CASE__ : Union[str, Any] =logits.softmax(dim=0 ) SCREAMING_SNAKE_CASE__ : Dict =probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) SCREAMING_SNAKE_CASE__ : List[Any] =[ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda __lowercase : -x[0] ) ] return result
711
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_neox""" def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Any =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE__ : Dict =hidden_act SCREAMING_SNAKE_CASE__ : str =rotary_pct SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout SCREAMING_SNAKE_CASE__ : str =classifier_dropout SCREAMING_SNAKE_CASE__ : Any =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : Any =use_cache SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
665
0
'''simple docstring''' import qiskit def _a( UpperCamelCase__ : Union[str, Any] = 2 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =qubits # Using Aer's simulator SCREAMING_SNAKE_CASE__ : Optional[Any] =qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register SCREAMING_SNAKE_CASE__ : Tuple =qiskit.QuantumCircuit(__lowerCAmelCase, __lowerCAmelCase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1, __lowerCAmelCase ): # Adding CX (CNOT) gate circuit.cx(i - 1, __lowerCAmelCase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator SCREAMING_SNAKE_CASE__ : str =qiskit.execute(__lowerCAmelCase, __lowerCAmelCase, shots=1_0_0_0 ) return job.result().get_counts(__lowerCAmelCase ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
712
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ): snake_case_ = FlaxAutoencoderKL @property def __magic_name__ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ : int =4 SCREAMING_SNAKE_CASE__ : Optional[Any] =3 SCREAMING_SNAKE_CASE__ : Optional[int] =(32, 32) SCREAMING_SNAKE_CASE__ : Union[str, Any] =jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ : Any =jax.random.uniform(_UpperCAmelCase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def __magic_name__ ( self : Optional[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Dict ={ '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.dummy_input return init_dict, inputs_dict
713
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if colsa != 1: SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if rowsa != rowsa: SCREAMING_SNAKE_CASE__ : str =( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(UpperCamelCase__ ) if len(UpperCamelCase__ ) != rowsa: SCREAMING_SNAKE_CASE__ : Union[str, Any] =( '''Number of initial values must be equal to number of rows in coefficient ''' f"matrix but received {len(UpperCamelCase__ )} and {rowsa}" ) raise ValueError(UpperCamelCase__ ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape strictly_diagonally_dominant(UpperCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =[] for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =0 for col in range(UpperCamelCase__ ): if col == row: SCREAMING_SNAKE_CASE__ : int =table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE__ : Any =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom new_val.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val return [float(UpperCamelCase__ ) for i in new_val] def _a( UpperCamelCase__ : NDArray[floataa] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape SCREAMING_SNAKE_CASE__ : Any =True for i in range(0, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int =0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMAEForPreTraining', 'ViTMAELayer', 'ViTMAEModel', 'ViTMAEPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TFViTMAEForPreTraining', 'TFViTMAEModel', 'TFViTMAEPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
714
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' with open(UpperCamelCase__ ) as metadata_file: SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module'''] # Load the entity vocab file SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer''' with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0] SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0] SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name] SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias'''] SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key] else: SCREAMING_SNAKE_CASE__ : Any =state_dict[key] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(UpperCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' ) SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9) SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.''' SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist() SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE__ : Dict =[ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def _a( UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]'''] SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Optional[int] ={} for entry in data: SCREAMING_SNAKE_CASE__ : Tuple =entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE__ : str =entity_id break SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}" SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id return new_mapping if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) a_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
665
0
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict =[] for line in lines: SCREAMING_SNAKE_CASE__ : int =re.sub(R'''#.*''', '''''', UpperCAmelCase__ ) # remove comments if line: filtered_lines.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] ='''\n'''.join(UpperCAmelCase__ ) # Make a hash from all this code SCREAMING_SNAKE_CASE__ : str =full_str.encode('''utf-8''' ) return shaaaa(UpperCAmelCase__ ).hexdigest() # get importable module names and hash for caching a_ = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions a_ = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) a_ = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name a_ = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
715
'''simple docstring''' def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] =True for i in range(UpperCamelCase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] =True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[Any] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' import mpmath # for roots of unity import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __lowercase : int=None , __lowercase : List[Any]=None ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[Any] =list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : Any =list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : Any =len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : Optional[int] =len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : List[Any] =int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : Dict =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : int =self.__multiply() def __magic_name__ ( self : List[Any] , __lowercase : Optional[int] ) -> int: SCREAMING_SNAKE_CASE__ : int =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(__lowercase ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : int =self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Union[str, Any] =[[] for i in range(__lowercase )] SCREAMING_SNAKE_CASE__ : Tuple =self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : int =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowercase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : List[str] =1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__lowercase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Dict =new_dft SCREAMING_SNAKE_CASE__ : Union[str, Any] =next_ncol // 2 return dft[0] def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Any =self.__dft('''A''' ) SCREAMING_SNAKE_CASE__ : List[str] =self.__dft('''B''' ) SCREAMING_SNAKE_CASE__ : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Dict =2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : Tuple =[[] for i in range(__lowercase )] SCREAMING_SNAKE_CASE__ : int =self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Tuple =1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : List[str] =new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : str =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] ='''A = ''' + ''' + '''.join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] ='''B = ''' + ''' + '''.join( F"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : int ='''A*B = ''' + ''' + '''.join( F"{coef}*x^{i}" for coef, i in enumerate(self.product ) ) return F"{a}\n{b}\n{c}" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
716
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =parent SCREAMING_SNAKE_CASE__ : Any =batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length SCREAMING_SNAKE_CASE__ : Dict =is_training SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids SCREAMING_SNAKE_CASE__ : List[Any] =use_labels SCREAMING_SNAKE_CASE__ : int =vocab_size SCREAMING_SNAKE_CASE__ : str =hidden_size SCREAMING_SNAKE_CASE__ : Any =embedding_size SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers SCREAMING_SNAKE_CASE__ : str =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range SCREAMING_SNAKE_CASE__ : str =num_labels SCREAMING_SNAKE_CASE__ : List[str] =num_choices SCREAMING_SNAKE_CASE__ : List[str] =scope def __magic_name__ ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int =None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : int =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =None SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Optional[int] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : List[str] ) -> Any: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =self.num_choices SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[Any] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__ ( self : str ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True # test_resize_embeddings = False snake_case_ = False def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self ) SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str ) -> Dict: self.config_tester.run_common_tests() def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase ) def __magic_name__ ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase ) def __magic_name__ ( self : Dict ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase ) def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase ) def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' return torch.tensor( UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, ) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__ ( self : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase ) model.to(__lowercase ) model.half() SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0] SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj] SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj] SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase ) self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a_ = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args execute_subprocess_async(__lowercase , env=os.environ.copy() )
665
0
'''simple docstring''' import argparse from collections import defaultdict def _a( UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =f"{file}_{class_name}_{test_name}" done_test[_id] += 1 with open(UpperCamelCase__, '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =f.readlines() SCREAMING_SNAKE_CASE__ : int =f"class {class_name}(" SCREAMING_SNAKE_CASE__ : Dict =f"{4 * ' '}def {test_name}(" SCREAMING_SNAKE_CASE__ : str =f"{8 * ' '}{correct_line.split()[0]}" SCREAMING_SNAKE_CASE__ : str =f"{1_6 * ' '}{correct_line.split()[0]}" SCREAMING_SNAKE_CASE__ : str =False SCREAMING_SNAKE_CASE__ : Union[str, Any] =False SCREAMING_SNAKE_CASE__ : Dict =False SCREAMING_SNAKE_CASE__ : List[Any] =False SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : str =0 SCREAMING_SNAKE_CASE__ : List[str] =[] for line in lines: if line.startswith(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =True elif in_class and line.startswith(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =True elif in_class and in_func and (line.startswith(UpperCamelCase__ ) or line.startswith(UpperCamelCase__ )): SCREAMING_SNAKE_CASE__ : List[str] =len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =True if in_class and in_func and in_line: if ")" not in line: continue else: SCREAMING_SNAKE_CASE__ : str =True if in_class and in_func and in_line and insert_line: new_lines.append(f"{spaces * ' '}{correct_line}" ) SCREAMING_SNAKE_CASE__ : str =False else: new_lines.append(UpperCamelCase__ ) with open(UpperCamelCase__, '''w''' ) as f: for line in new_lines: f.write(UpperCamelCase__ ) def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any]=None ): '''simple docstring''' if fail is not None: with open(UpperCamelCase__, '''r''' ) as f: SCREAMING_SNAKE_CASE__ : List[str] ={l.strip() for l in f.readlines()} else: SCREAMING_SNAKE_CASE__ : Tuple =None with open(UpperCamelCase__, '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =f.readlines() SCREAMING_SNAKE_CASE__ : int =defaultdict(UpperCamelCase__ ) for line in correct_lines: SCREAMING_SNAKE_CASE__ : Dict =line.split(''';''' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) a_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
718
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = ShapEImgaImgPipeline snake_case_ = ["""image"""] snake_case_ = ["""image"""] snake_case_ = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[Any] ) -> List[Any]: return 32 @property def __magic_name__ ( self : List[str] ) -> Optional[int]: return 32 @property def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: return self.time_input_dim * 4 @property def __magic_name__ ( self : Dict ) -> Union[str, Any]: return 8 @property def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor( crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __magic_name__ ( self : List[str] ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : str ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase ) return model def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.dummy_prior SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , ) SCREAMING_SNAKE_CASE__ : Any ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any: SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : int ='''cpu''' SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] =np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __magic_name__ ( self : List[Any] ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __magic_name__ ( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu''' SCREAMING_SNAKE_CASE__ : Optional[Any] =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , ) def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =1 SCREAMING_SNAKE_CASE__ : List[str] =2 SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase ) for key in inputs.keys(): if key in self.batch_params: SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]] SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Optional[Any] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) SCREAMING_SNAKE_CASE__ : Dict =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple =pipe( __lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
719
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_bigcode""" snake_case_ = ["""past_key_values"""] snake_case_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions SCREAMING_SNAKE_CASE__ : Dict =n_embd SCREAMING_SNAKE_CASE__ : Dict =n_layer SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head SCREAMING_SNAKE_CASE__ : List[str] =n_inner SCREAMING_SNAKE_CASE__ : List[str] =activation_function SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon SCREAMING_SNAKE_CASE__ : List[str] =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : Dict =multi_query SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
665
0
'''simple docstring''' import numpy as np from transformers import Pipeline def _a( UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =np.max(UpperCamelCase__, axis=-1, keepdims=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowercase_ ): def __magic_name__ ( self : List[str] , **__lowercase : List[str] ) -> int: SCREAMING_SNAKE_CASE__ : List[Any] ={} if "second_text" in kwargs: SCREAMING_SNAKE_CASE__ : List[str] =kwargs['''second_text'''] return preprocess_kwargs, {}, {} def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : List[Any]=None ) -> Optional[Any]: return self.tokenizer(__lowercase , text_pair=__lowercase , return_tensors=self.framework ) def __magic_name__ ( self : Union[str, Any] , __lowercase : int ) -> Dict: return self.model(**__lowercase ) def __magic_name__ ( self : Any , __lowercase : Any ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_outputs.logits[0].numpy() SCREAMING_SNAKE_CASE__ : List[Any] =softmax(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.argmax(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.model.config.idalabel[best_class] SCREAMING_SNAKE_CASE__ : str =probabilities[best_class].item() SCREAMING_SNAKE_CASE__ : Optional[Any] =logits.tolist() return {"label": label, "score": score, "logits": logits}
720
'''simple docstring''' class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Union[str, Any] =size SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size SCREAMING_SNAKE_CASE__ : str =[0] * size @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return index | (index + 1) @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return (index & (index + 1)) - 1 def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =value while index < self.size: SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1 if current_left_border == index: SCREAMING_SNAKE_CASE__ : List[str] =value else: SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase ) def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int: right -= 1 # Because of right is exclusive SCREAMING_SNAKE_CASE__ : str =0 while left <= right: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase ) if left <= current_left: SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] ) SCREAMING_SNAKE_CASE__ : Any =current_left else: SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
665
0
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str]=1e-12 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase__, axis=1 ), a_min=lowerCamelCase__ ) ).T SCREAMING_SNAKE_CASE__ : Dict =jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowerCamelCase__, axis=1 ), a_min=lowerCamelCase__ ) ).T return jnp.matmul(lowerCamelCase__, norm_emb_a.T ) class __SCREAMING_SNAKE_CASE ( nn.Module ): snake_case_ = 42 snake_case_ = jnp.floataa def __magic_name__ ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE__ : List[Any] =FlaxCLIPVisionModule(self.config.vision_config ) SCREAMING_SNAKE_CASE__ : List[Any] =nn.Dense(self.config.projection_dim , use_bias=__lowercase , dtype=self.dtype ) SCREAMING_SNAKE_CASE__ : List[Any] =self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) ) SCREAMING_SNAKE_CASE__ : Dict =self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) SCREAMING_SNAKE_CASE__ : List[Any] =self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) ) SCREAMING_SNAKE_CASE__ : List[Any] =self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) ) def __call__( self : str , __lowercase : str ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict =self.vision_model(__lowercase )[1] SCREAMING_SNAKE_CASE__ : Dict =self.visual_projection(__lowercase ) SCREAMING_SNAKE_CASE__ : int =jax_cosine_distance(__lowercase , self.special_care_embeds ) SCREAMING_SNAKE_CASE__ : List[Any] =jax_cosine_distance(__lowercase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs SCREAMING_SNAKE_CASE__ : Tuple =0.0 SCREAMING_SNAKE_CASE__ : int =special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment SCREAMING_SNAKE_CASE__ : List[str] =jnp.round(__lowercase , 3 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =jnp.any(special_scores > 0 , axis=1 , keepdims=__lowercase ) # Use a lower threshold if an image has any special care concept SCREAMING_SNAKE_CASE__ : Optional[int] =is_special_care * 0.01 SCREAMING_SNAKE_CASE__ : Tuple =cos_dist - self.concept_embeds_weights[None, :] + special_adjustment SCREAMING_SNAKE_CASE__ : str =jnp.round(__lowercase , 3 ) SCREAMING_SNAKE_CASE__ : Dict =jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = CLIPConfig snake_case_ = """clip_input""" snake_case_ = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Optional[int] , __lowercase : CLIPConfig , __lowercase : Optional[Tuple] = None , __lowercase : int = 0 , __lowercase : jnp.dtype = jnp.floataa , __lowercase : bool = True , **__lowercase : List[str] , ) -> List[str]: if input_shape is None: SCREAMING_SNAKE_CASE__ : Optional[Any] =(1, 2_24, 2_24, 3) SCREAMING_SNAKE_CASE__ : int =self.module_class(config=__lowercase , dtype=__lowercase , **__lowercase ) super().__init__(__lowercase , __lowercase , input_shape=__lowercase , seed=__lowercase , dtype=__lowercase , _do_init=_do_init ) def __magic_name__ ( self : List[Any] , __lowercase : jax.random.KeyArray , __lowercase : Tuple , __lowercase : FrozenDict = None ) -> FrozenDict: SCREAMING_SNAKE_CASE__ : List[str] =jax.random.normal(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =jax.random.split(__lowercase ) SCREAMING_SNAKE_CASE__ : int ={'''params''': params_rng, '''dropout''': dropout_rng} SCREAMING_SNAKE_CASE__ : str =self.module.init(__lowercase , __lowercase )['''params'''] return random_params def __call__( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : dict = None , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any =jnp.transpose(__lowercase , (0, 2, 3, 1) ) return self.module.apply( {'''params''': params or self.params} , jnp.array(__lowercase , dtype=jnp.floataa ) , rngs={} , )
721
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = ["""vqvae"""] def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int: super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase ) def __magic_name__ ( self : List[str] ) -> int: return 50 if isinstance(self.scheduler , __lowercase ) else 10_00 @torch.no_grad() def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps() self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowercase , device=self.device , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =noise SCREAMING_SNAKE_CASE__ : List[str] =None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample( generator=__lowercase )[0] SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images if start_step > 0: SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowercase ): SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample'''] else: SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample'''] if isinstance(self.scheduler , __lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample'''] else: SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start] if mask_end > 0: SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy() SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' ) SCREAMING_SNAKE_CASE__ : Any =list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) ) @torch.no_grad() def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __lowercase ) self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t] SCREAMING_SNAKE_CASE__ : int =( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor: SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
665
0
import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =old_name if "patch_embed" in old_name: SCREAMING_SNAKE_CASE__ : Dict =old_name.split('''.''' ) if layer == "0": SCREAMING_SNAKE_CASE__ : Optional[int] =old_name.replace('''0''', '''convolution1''' ) elif layer == "1": SCREAMING_SNAKE_CASE__ : Dict =old_name.replace('''1''', '''batchnorm_before''' ) elif layer == "3": SCREAMING_SNAKE_CASE__ : Any =old_name.replace('''3''', '''convolution2''' ) else: SCREAMING_SNAKE_CASE__ : List[Any] =old_name.replace('''4''', '''batchnorm_after''' ) if "network" in old_name and re.search(R'''\d\.\d''', UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =R'''\b\d{2}\b''' if bool(re.search(UpperCamelCase__, UpperCamelCase__ ) ): SCREAMING_SNAKE_CASE__ : List[str] =re.search(R'''\d\.\d\d.''', UpperCamelCase__ ).group() else: SCREAMING_SNAKE_CASE__ : Any =re.search(R'''\d\.\d.''', UpperCamelCase__ ).group() if int(match[0] ) < 6: SCREAMING_SNAKE_CASE__ : Tuple =old_name.replace(UpperCamelCase__, '''''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] ) SCREAMING_SNAKE_CASE__ : int ='''intermediate_stages.''' + trimmed_name else: SCREAMING_SNAKE_CASE__ : Optional[int] =old_name.replace(UpperCamelCase__, '''''' ) if int(match[2] ) < num_meta4D_last_stage: SCREAMING_SNAKE_CASE__ : Optional[int] =trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index ) if "norm1" in old_name: SCREAMING_SNAKE_CASE__ : List[Any] =trimmed_name.replace('''norm1''', '''layernorm1''' ) elif "norm2" in old_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''norm2''', '''layernorm2''' ) elif "fc1" in old_name: SCREAMING_SNAKE_CASE__ : Optional[int] =trimmed_name.replace('''fc1''', '''linear_in''' ) elif "fc2" in old_name: SCREAMING_SNAKE_CASE__ : List[Any] =trimmed_name.replace('''fc2''', '''linear_out''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''last_stage.''' + trimmed_name elif "network" in old_name and re.search(R'''.\d.''', UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Any =old_name.replace('''network''', '''intermediate_stages''' ) if "fc" in new_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_name.replace('''fc''', '''convolution''' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): SCREAMING_SNAKE_CASE__ : str =new_name.replace('''norm1''', '''batchnorm_before''' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): SCREAMING_SNAKE_CASE__ : Optional[int] =new_name.replace('''norm2''', '''batchnorm_after''' ) if "proj" in new_name: SCREAMING_SNAKE_CASE__ : List[Any] =new_name.replace('''proj''', '''projection''' ) if "dist_head" in new_name: SCREAMING_SNAKE_CASE__ : Union[str, Any] =new_name.replace('''dist_head''', '''distillation_classifier''' ) elif "head" in new_name: SCREAMING_SNAKE_CASE__ : List[Any] =new_name.replace('''head''', '''classifier''' ) elif "patch_embed" in new_name: SCREAMING_SNAKE_CASE__ : Any ='''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": SCREAMING_SNAKE_CASE__ : List[Any] =new_name.replace('''norm''', '''layernorm''' ) SCREAMING_SNAKE_CASE__ : List[str] ='''efficientformer.''' + new_name else: SCREAMING_SNAKE_CASE__ : Any ='''efficientformer.encoder.''' + new_name return new_name def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict ): '''simple docstring''' for key in checkpoint.copy().keys(): SCREAMING_SNAKE_CASE__ : str =checkpoint.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =val return checkpoint def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE__ : Dict =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw ) return image def _a( UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : bool ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model'''] SCREAMING_SNAKE_CASE__ : str =EfficientFormerConfig.from_json_file(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] ) SCREAMING_SNAKE_CASE__ : Optional[int] =config.depths[-1] - config.num_metaad_blocks + 1 SCREAMING_SNAKE_CASE__ : Optional[Any] =convert_torch_checkpoint(UpperCamelCase__, UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] ={ '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image SCREAMING_SNAKE_CASE__ : List[Any] =prepare_img() SCREAMING_SNAKE_CASE__ : Any =2_5_6 SCREAMING_SNAKE_CASE__ : str =2_2_4 SCREAMING_SNAKE_CASE__ : int =EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values # original processing pipeline SCREAMING_SNAKE_CASE__ : Union[str, Any] =Compose( [ Resize(UpperCamelCase__, interpolation=pillow_resamplings['''bicubic'''] ), CenterCrop(UpperCamelCase__ ), ToTensor(), Normalize(UpperCamelCase__, UpperCamelCase__ ), ] ) SCREAMING_SNAKE_CASE__ : Dict =image_transforms(UpperCamelCase__ ).unsqueeze(0 ) assert torch.allclose(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =outputs.logits SCREAMING_SNAKE_CASE__ : Tuple =(1, 1_0_0_0) if "l1" in model_name: SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Tensor( [-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] ) assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 ) assert logits.shape == expected_shape elif "l3" in model_name: SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Tensor( [-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] ) assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 ) assert logits.shape == expected_shape elif "l7" in model_name: SCREAMING_SNAKE_CASE__ : int =torch.Tensor( [-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] ) assert logits.shape == expected_shape else: raise ValueError( f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" ) # Save Checkpoints Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) processor.save_pretrained(UpperCamelCase__ ) print(f"Processor successfuly saved at {pytorch_dump_path}" ) if push_to_hub: print('''Pushing model to the hub...''' ) model.push_to_hub( repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add model''', use_temp_dir=UpperCamelCase__, ) processor.push_to_hub( repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) a_ = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
700
'''simple docstring''' from math import isqrt def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =[True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Any =False return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]] def _a( UpperCamelCase__ : int = 1_0**8 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' import flax.linen as nn import jax import jax.numpy as jnp class __SCREAMING_SNAKE_CASE ( nn.Module ): snake_case_ = 42 snake_case_ = jnp.floataa def __magic_name__ ( self : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[Any] =nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Dict , __lowercase : Tuple ) -> Dict: SCREAMING_SNAKE_CASE__ : List[Any] =hidden_states.shape SCREAMING_SNAKE_CASE__ : Optional[Any] =jax.image.resize( __lowercase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =self.conv(__lowercase ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): snake_case_ = 42 snake_case_ = jnp.floataa def __magic_name__ ( self : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Optional[Any] , __lowercase : Optional[int] ) -> Optional[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) SCREAMING_SNAKE_CASE__ : int =self.conv(__lowercase ) return hidden_states class __SCREAMING_SNAKE_CASE ( nn.Module ): snake_case_ = 42 snake_case_ = None snake_case_ = 0.0 snake_case_ = None snake_case_ = jnp.floataa def __magic_name__ ( self : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.in_channels if self.out_channels is None else self.out_channels SCREAMING_SNAKE_CASE__ : Optional[int] =nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE__ : str =nn.Conv( __lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.Dense(__lowercase , dtype=self.dtype ) SCREAMING_SNAKE_CASE__ : str =nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE__ : str =nn.Dropout(self.dropout_prob ) SCREAMING_SNAKE_CASE__ : Optional[int] =nn.Conv( __lowercase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) SCREAMING_SNAKE_CASE__ : str =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut SCREAMING_SNAKE_CASE__ : Any =None if use_nin_shortcut: SCREAMING_SNAKE_CASE__ : List[Any] =nn.Conv( __lowercase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , ) def __call__( self : int , __lowercase : List[Any] , __lowercase : Dict , __lowercase : List[Any]=True ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =hidden_states SCREAMING_SNAKE_CASE__ : List[str] =self.norma(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =nn.swish(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =self.conva(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.time_emb_proj(nn.swish(__lowercase ) ) SCREAMING_SNAKE_CASE__ : int =jnp.expand_dims(jnp.expand_dims(__lowercase , 1 ) , 1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_states + temb SCREAMING_SNAKE_CASE__ : Dict =self.norma(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =nn.swish(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.dropout(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.conva(__lowercase ) if self.conv_shortcut is not None: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.conv_shortcut(__lowercase ) return hidden_states + residual
701
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = SpeechTaTokenizer snake_case_ = False snake_case_ = True def __magic_name__ ( self : int ) -> Any: super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test''' SCREAMING_SNAKE_CASE__ : int ='''this is a test''' return input_text, output_text def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase ) return text, ids def __magic_name__ ( self : Dict ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>''' SCREAMING_SNAKE_CASE__ : Optional[int] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__lowercase ) , 81 ) def __magic_name__ ( self : Dict ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __magic_name__ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Any =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : int =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size_a + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __magic_name__ ( self : Optional[Any] ) -> Any: pass def __magic_name__ ( self : List[str] ) -> List[Any]: pass def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase ) # fmt: off self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def __magic_name__ ( self : List[str] ) -> List[str]: # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE__ : List[Any] =[ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off SCREAMING_SNAKE_CASE__ : str ={ '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
665
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def _a( UpperCamelCase__ : Dict ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(UpperCamelCase__, UpperCamelCase__ ) def _a( UpperCamelCase__ : int ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =emb.weight.shape SCREAMING_SNAKE_CASE__ : int =nn.Linear(UpperCamelCase__, UpperCamelCase__, bias=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =emb.weight.data return lin_layer def _a( UpperCamelCase__ : int ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] SCREAMING_SNAKE_CASE__ : int =mam_aaa['''model'''] remove_ignore_keys_(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =state_dict['''encoder.embed_tokens.weight'''].shape[0] SCREAMING_SNAKE_CASE__ : List[str] =MaMaaaConfig( vocab_size=UpperCamelCase__, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', ) SCREAMING_SNAKE_CASE__ : Tuple =state_dict['''decoder.embed_tokens.weight'''] SCREAMING_SNAKE_CASE__ : str =MaMaaaForConditionalGeneration(UpperCamelCase__ ) model.model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Dict =make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') a_ = parser.parse_args() a_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
702
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =parent SCREAMING_SNAKE_CASE__ : List[str] =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : List[Any] =num_channels SCREAMING_SNAKE_CASE__ : int =patch_size SCREAMING_SNAKE_CASE__ : Tuple =num_frames SCREAMING_SNAKE_CASE__ : List[Any] =is_training SCREAMING_SNAKE_CASE__ : List[str] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_type SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : Any =scope SCREAMING_SNAKE_CASE__ : int =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : int =self.get_config() return config, pixel_values, labels def __magic_name__ ( self : int ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels return config def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) # verify the logits shape SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowercase ) def __magic_name__ ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : str ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester( self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int: SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : List[Any] ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ) -> Optional[int]: pass def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def __magic_name__ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> List[str]: if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames SCREAMING_SNAKE_CASE__ : Optional[Any] =True SCREAMING_SNAKE_CASE__ : str =False SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ : List[Any] =True SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__ : Optional[int] =True SCREAMING_SNAKE_CASE__ : Union[str, Any] =True SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) self.assertEqual(out_len + 1 , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self : Tuple ) -> List[Any]: def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ): SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowercase ) , __lowercase ) SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : List[str] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : Any ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor SCREAMING_SNAKE_CASE__ : Tuple =prepare_video() SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] =True for i in range(UpperCamelCase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] =True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[Any] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
703
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } a_ = { 'bert-base-uncased': 5_1_2, 'bert-large-uncased': 5_1_2, 'bert-base-cased': 5_1_2, 'bert-large-cased': 5_1_2, 'bert-base-multilingual-uncased': 5_1_2, 'bert-base-multilingual-cased': 5_1_2, 'bert-base-chinese': 5_1_2, 'bert-base-german-cased': 5_1_2, 'bert-large-uncased-whole-word-masking': 5_1_2, 'bert-large-cased-whole-word-masking': 5_1_2, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-base-cased-finetuned-mrpc': 5_1_2, 'bert-base-german-dbmdz-cased': 5_1_2, 'bert-base-german-dbmdz-uncased': 5_1_2, 'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2, 'wietsedv/bert-base-dutch-cased': 5_1_2, } a_ = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = BertTokenizer def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]: super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE__ : Any =do_lower_case SCREAMING_SNAKE_CASE__ : Any =strip_accents SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
665
0
'''simple docstring''' import cva import numpy as np class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : float , __lowercase : int ) -> Any: if k in (0.04, 0.06): SCREAMING_SNAKE_CASE__ : Dict =k SCREAMING_SNAKE_CASE__ : List[str] =window_size else: raise ValueError('''invalid k value''' ) def __str__( self : Tuple ) -> str: return str(self.k ) def __magic_name__ ( self : List[str] , __lowercase : str ) -> tuple[cva.Mat, list[list[int]]]: SCREAMING_SNAKE_CASE__ : str =cva.imread(__lowercase , 0 ) SCREAMING_SNAKE_CASE__ : Dict =img.shape SCREAMING_SNAKE_CASE__ : list[list[int]] =[] SCREAMING_SNAKE_CASE__ : int =img.copy() SCREAMING_SNAKE_CASE__ : Optional[int] =cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB ) SCREAMING_SNAKE_CASE__ : List[str] =np.gradient(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =dx**2 SCREAMING_SNAKE_CASE__ : List[str] =dy**2 SCREAMING_SNAKE_CASE__ : Optional[Any] =dx * dy SCREAMING_SNAKE_CASE__ : Optional[int] =0.04 SCREAMING_SNAKE_CASE__ : Tuple =self.window_size // 2 for y in range(__lowercase , h - offset ): for x in range(__lowercase , w - offset ): SCREAMING_SNAKE_CASE__ : Dict =ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ : Dict =iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ : Union[str, Any] =ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ : Optional[Any] =(wxx * wyy) - (wxy**2) SCREAMING_SNAKE_CASE__ : Optional[Any] =wxx + wyy SCREAMING_SNAKE_CASE__ : Optional[int] =det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": a_ = HarrisCorner(0.04, 3) a_ , a_ = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
704
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a_ = False a_ = False def _a( UpperCamelCase__ : Namespace ): '''simple docstring''' return TrainCommand(UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): @staticmethod def __magic_name__ ( __lowercase : ArgumentParser ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' ) SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =args.output SCREAMING_SNAKE_CASE__ : str =args.column_label SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text SCREAMING_SNAKE_CASE__ : Tuple =args.column_id self.logger.info(F"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"Loading dataset from {args.train_data}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =None if args.validation_data: self.logger.info(F"Loading validation dataset from {args.validation_data}" ) SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon def __magic_name__ ( self : Any ) -> str: if self.framework == "tf": return self.run_tf() return self.run_torch() def __magic_name__ ( self : Optional[int] ) -> Tuple: raise NotImplementedError def __magic_name__ ( self : Dict ) -> List[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
665
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Tuple=1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =[] for _ in range(UpperCamelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Union[str, Any]=1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =[] for step in range(UpperCamelCase__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : str =os.path.join(UpperCamelCase__, '''schedule.bin''' ) torch.save(scheduler.state_dict(), UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Dict =torch.load(UpperCamelCase__ ) scheduler.load_state_dict(UpperCamelCase__ ) return lrs @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : List[str] ) -> int: self.assertEqual(len(__lowercase ) , len(__lowercase ) ) for a, b in zip(__lowercase , __lowercase ): self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase ) def __magic_name__ ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =torch.tensor([0.4, 0.2, -0.5] ) SCREAMING_SNAKE_CASE__ : Dict =nn.MSELoss() # No warmup, constant schedule, no gradient clipping SCREAMING_SNAKE_CASE__ : Optional[int] =AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =criterion(__lowercase , __lowercase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor([0.4, 0.2, -0.5] ) SCREAMING_SNAKE_CASE__ : List[str] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping SCREAMING_SNAKE_CASE__ : Dict =Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowercase , weight_decay=0.0 , relative_step=__lowercase , scale_parameter=__lowercase , warmup_init=__lowercase , ) for _ in range(10_00 ): SCREAMING_SNAKE_CASE__ : List[Any] =criterion(__lowercase , __lowercase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = nn.Linear(50 , 50 ) if is_torch_available() else None snake_case_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None snake_case_ = 10 def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int]=None ) -> Union[str, Any]: self.assertEqual(len(__lowercase ) , len(__lowercase ) ) for a, b in zip(__lowercase , __lowercase ): self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase , msg=__lowercase ) def __magic_name__ ( self : List[Any] ) -> str: SCREAMING_SNAKE_CASE__ : int ={'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) SCREAMING_SNAKE_CASE__ : Any ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): SCREAMING_SNAKE_CASE__ : Optional[int] =data SCREAMING_SNAKE_CASE__ : List[Any] =scheduler_func(self.optimizer , **__lowercase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) SCREAMING_SNAKE_CASE__ : List[str] =unwrap_schedule(__lowercase , self.num_steps ) self.assertListAlmostEqual( __lowercase , __lowercase , tol=1e-2 , msg=F"failed for {scheduler_func} in normal scheduler" , ) SCREAMING_SNAKE_CASE__ : str =scheduler_func(self.optimizer , **__lowercase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__lowercase ) # wrap to test picklability of the schedule SCREAMING_SNAKE_CASE__ : Union[str, Any] =unwrap_and_save_reload_schedule(__lowercase , self.num_steps ) self.assertListEqual(__lowercase , __lowercase , msg=F"failed for {scheduler_func} in save and reload" ) class __SCREAMING_SNAKE_CASE : def __init__( self : str , __lowercase : Any ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : str =fn def __call__( self : Union[str, Any] , *__lowercase : Any , **__lowercase : Optional[Any] ) -> int: return self.fn(*__lowercase , **__lowercase ) @classmethod def __magic_name__ ( self : Tuple , __lowercase : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] =list(map(self , scheduler.lr_lambdas ) )
705
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""] snake_case_ = [ """image_embeds""", """negative_image_embeds""", """image""", ] snake_case_ = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[str] ) -> Tuple: return 32 @property def __magic_name__ ( self : List[str] ) -> str: return 32 @property def __magic_name__ ( self : Any ) -> Optional[int]: return self.time_input_dim @property def __magic_name__ ( self : List[Any] ) -> int: return self.time_input_dim * 4 @property def __magic_name__ ( self : Tuple ) -> Optional[int]: return 1_00 @property def __magic_name__ ( self : Union[str, Any] ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase ) return model @property def __magic_name__ ( self : Dict ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs ) return model def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : str ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu''' SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple =np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : int =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from __future__ import annotations a_ = 1.6021E-19 # units = C def _a( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float, ): '''simple docstring''' if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
706
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): snake_case_ = 42 # Cache store of keys snake_case_ = 42 # References of the keys in cache snake_case_ = 10 # Maximum capacity of cache def __init__( self : Dict , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Any =deque() SCREAMING_SNAKE_CASE__ : str =set() if not n: SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =n def __magic_name__ ( self : List[str] , __lowercase : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop() self.key_reference.remove(__lowercase ) else: self.dq_store.remove(__lowercase ) self.dq_store.appendleft(__lowercase ) self.key_reference.add(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__lowercase ) def __repr__( self : List[Any] ) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}" if __name__ == "__main__": import doctest doctest.testmod() a_ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
665
0
'''simple docstring''' import re from filelock import FileLock try: import nltk a_ = True except (ImportError, ModuleNotFoundError): a_ = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _a( UpperCamelCase__ : str ): '''simple docstring''' re.sub('''<n>''', '''''', UpperCamelCase__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
707
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a_ = list[list[float | int]] def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col] SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0] SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : Tuple =0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, UpperCamelCase__ ): for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col] for cola in range(UpperCamelCase__, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ ) ] def _a( UpperCamelCase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Dict =y_val SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ ) def interpolated_func(UpperCamelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCamelCase__ ) ) return interpolated_func def _a( UpperCamelCase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Any =1 while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ): x_val += 1 ret += poly(UpperCamelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' from collections import defaultdict def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =first_str.lower().strip() SCREAMING_SNAKE_CASE__ : Optional[Any] =second_str.lower().strip() # Remove whitespace SCREAMING_SNAKE_CASE__ : Tuple =first_str.replace(''' ''', '''''' ) SCREAMING_SNAKE_CASE__ : List[str] =second_str.replace(''' ''', '''''' ) # Strings of different lengths are not anagrams if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): return False # Default values for count should be 0 SCREAMING_SNAKE_CASE__ : defaultdict[str, int] =defaultdict(UpperCamelCase__ ) # For each character in input strings, # increment count in the corresponding for i in range(len(UpperCamelCase__ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() a_ = input('Enter the first string ').strip() a_ = input('Enter the second string ').strip() a_ = check_anagrams(input_a, input_b) print(F'''{input_a} and {input_b} are {'' if status else 'not '}anagrams.''')
708
'''simple docstring''' def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE__ : Union[str, Any] =left SCREAMING_SNAKE_CASE__ : Optional[Any] =point elif point > right: SCREAMING_SNAKE_CASE__ : Optional[int] =right SCREAMING_SNAKE_CASE__ : Tuple =point else: if item < current_item: SCREAMING_SNAKE_CASE__ : str =point - 1 else: SCREAMING_SNAKE_CASE__ : Tuple =point + 1 return None def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ ) def _a( UpperCamelCase__ : Dict ): '''simple docstring''' if collection != sorted(UpperCamelCase__ ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys a_ = 0 if debug == 1: a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') a_ = 6_7 a_ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
665
0
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration a_ = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] a_ = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] a_ = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) a_ = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) a_ = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ): '''simple docstring''' for tf_name, hf_name in patterns: SCREAMING_SNAKE_CASE__ : str =k.replace(UpperCamelCase__, UpperCamelCase__ ) return k def _a( UpperCamelCase__ : dict, UpperCamelCase__ : dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =BigBirdPegasusConfig(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int =BigBirdPegasusForConditionalGeneration(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =torch_model.state_dict() SCREAMING_SNAKE_CASE__ : Dict ={} # separating decoder weights SCREAMING_SNAKE_CASE__ : int ={k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} SCREAMING_SNAKE_CASE__ : List[Any] ={k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): SCREAMING_SNAKE_CASE__ : Any =[k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE] if any(UpperCamelCase__ ): continue SCREAMING_SNAKE_CASE__ : Optional[int] =DECODER_PATTERNS SCREAMING_SNAKE_CASE__ : Union[str, Any] =rename_state_dict_key(UpperCamelCase__, UpperCamelCase__ ) if new_k not in state_dict: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =v.T SCREAMING_SNAKE_CASE__ : int =torch.from_numpy(UpperCamelCase__ ) assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): SCREAMING_SNAKE_CASE__ : Dict =[k.endswith(UpperCamelCase__ ) for ending in KEYS_TO_IGNORE] if any(UpperCamelCase__ ): continue SCREAMING_SNAKE_CASE__ : List[Any] =REMAINING_PATTERNS SCREAMING_SNAKE_CASE__ : Any =rename_state_dict_key(UpperCamelCase__, UpperCamelCase__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): SCREAMING_SNAKE_CASE__ : Optional[Any] =v.T SCREAMING_SNAKE_CASE__ : List[str] =torch.from_numpy(UpperCamelCase__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}" SCREAMING_SNAKE_CASE__ : Dict =mapping['''model.embed_positions.weight'''] SCREAMING_SNAKE_CASE__ : Optional[int] =mapping.pop('''model.embed_positions.weight''' ) SCREAMING_SNAKE_CASE__ : int =torch_model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =[ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def _a( UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =tf.train.list_variables(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Dict ={} SCREAMING_SNAKE_CASE__ : int =['''global_step'''] for name, shape in tqdm(UpperCamelCase__, desc='''converting tf checkpoint to dict''' ): SCREAMING_SNAKE_CASE__ : List[str] =any(pat in name for pat in ignore_name ) if skip_key: continue SCREAMING_SNAKE_CASE__ : Any =tf.train.load_variable(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Dict =array return tf_weights def _a( UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =get_tf_weights_as_numpy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =convert_bigbird_pegasus(UpperCamelCase__, UpperCamelCase__ ) torch_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') a_ = parser.parse_args() a_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
709
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @require_tf def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @slow @require_torch def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : str =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable a_ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['DPTFeatureExtractor'] a_ = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
710
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = JukeboxTokenizer snake_case_ = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def __magic_name__ ( self : Optional[int] ) -> str: import torch SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : str =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __magic_name__ ( self : Any ) -> List[str]: import torch SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
665
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =[] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE__ : Tuple =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int]=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE__ : List[Any] ='''''' else: SCREAMING_SNAKE_CASE__ : Optional[int] ='''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ : List[Any] =state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ : int =in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE__ : str =in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE__ : str =in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE__ : str =in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE__ : List[str] =in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE__ : Dict =in_proj_bias[-config.hidden_size :] def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(UpperCamelCase__, UpperCamelCase__ ) def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =val def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg''' SCREAMING_SNAKE_CASE__ : List[Any] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def _a( UpperCamelCase__ : int, UpperCamelCase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =ViTConfig() SCREAMING_SNAKE_CASE__ : int =False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(vit_name[-1_2:-1_0] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(vit_name[-9:-6] ) else: SCREAMING_SNAKE_CASE__ : Any =1_0_0_0 SCREAMING_SNAKE_CASE__ : List[Any] ='''huggingface/label-files''' SCREAMING_SNAKE_CASE__ : int ='''imagenet-1k-id2label.json''' SCREAMING_SNAKE_CASE__ : List[Any] =json.load(open(hf_hub_download(UpperCamelCase__, UpperCamelCase__, repo_type='''dataset''' ), '''r''' ) ) SCREAMING_SNAKE_CASE__ : List[str] ={int(UpperCamelCase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : int =idalabel SCREAMING_SNAKE_CASE__ : str ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE__ : List[Any] =int(vit_name[-6:-4] ) SCREAMING_SNAKE_CASE__ : int =int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): SCREAMING_SNAKE_CASE__ : Optional[int] =1_9_2 SCREAMING_SNAKE_CASE__ : Dict =7_6_8 SCREAMING_SNAKE_CASE__ : Optional[int] =1_2 SCREAMING_SNAKE_CASE__ : Tuple =3 elif vit_name[9:].startswith('''small''' ): SCREAMING_SNAKE_CASE__ : Any =3_8_4 SCREAMING_SNAKE_CASE__ : Any =1_5_3_6 SCREAMING_SNAKE_CASE__ : Any =1_2 SCREAMING_SNAKE_CASE__ : Union[str, Any] =6 else: pass else: if vit_name[4:].startswith('''small''' ): SCREAMING_SNAKE_CASE__ : str =7_6_8 SCREAMING_SNAKE_CASE__ : Optional[int] =2_3_0_4 SCREAMING_SNAKE_CASE__ : List[str] =8 SCREAMING_SNAKE_CASE__ : List[Any] =8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): SCREAMING_SNAKE_CASE__ : str =1_0_2_4 SCREAMING_SNAKE_CASE__ : Optional[Any] =4_0_9_6 SCREAMING_SNAKE_CASE__ : Optional[Any] =2_4 SCREAMING_SNAKE_CASE__ : Dict =1_6 elif vit_name[4:].startswith('''huge''' ): SCREAMING_SNAKE_CASE__ : List[Any] =1_2_8_0 SCREAMING_SNAKE_CASE__ : int =5_1_2_0 SCREAMING_SNAKE_CASE__ : Tuple =3_2 SCREAMING_SNAKE_CASE__ : int =1_6 # load original model from timm SCREAMING_SNAKE_CASE__ : str =timm.create_model(UpperCamelCase__, pretrained=UpperCamelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys SCREAMING_SNAKE_CASE__ : Optional[int] =timm_model.state_dict() if base_model: remove_classification_head_(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =create_rename_keys(UpperCamelCase__, UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # load HuggingFace model if vit_name[-5:] == "in21k": SCREAMING_SNAKE_CASE__ : List[str] =ViTModel(UpperCamelCase__ ).eval() else: SCREAMING_SNAKE_CASE__ : Dict =ViTForImageClassification(UpperCamelCase__ ).eval() model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: SCREAMING_SNAKE_CASE__ : Optional[Any] =DeiTImageProcessor(size=config.image_size ) else: SCREAMING_SNAKE_CASE__ : Any =ViTImageProcessor(size=config.image_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processor(images=prepare_img(), return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : Dict =encoding['''pixel_values'''] SCREAMING_SNAKE_CASE__ : Any =model(UpperCamelCase__ ) if base_model: SCREAMING_SNAKE_CASE__ : str =timm_model.forward_features(UpperCamelCase__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(UpperCamelCase__, outputs.pooler_output, atol=1e-3 ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =timm_model(UpperCamelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCamelCase__, outputs.logits, atol=1e-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) a_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
711
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_neox""" def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Any =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE__ : Dict =hidden_act SCREAMING_SNAKE_CASE__ : str =rotary_pct SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout SCREAMING_SNAKE_CASE__ : str =classifier_dropout SCREAMING_SNAKE_CASE__ : Any =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : Any =use_cache SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
665
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
712
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' def _a( UpperCamelCase__ : list, UpperCamelCase__ : int, UpperCamelCase__ : int = 0, UpperCamelCase__ : int = 0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =right or len(UpperCamelCase__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(UpperCamelCase__, UpperCamelCase__, left + 1, right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
713
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if colsa != 1: SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if rowsa != rowsa: SCREAMING_SNAKE_CASE__ : str =( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(UpperCamelCase__ ) if len(UpperCamelCase__ ) != rowsa: SCREAMING_SNAKE_CASE__ : Union[str, Any] =( '''Number of initial values must be equal to number of rows in coefficient ''' f"matrix but received {len(UpperCamelCase__ )} and {rowsa}" ) raise ValueError(UpperCamelCase__ ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape strictly_diagonally_dominant(UpperCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =[] for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =0 for col in range(UpperCamelCase__ ): if col == row: SCREAMING_SNAKE_CASE__ : int =table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE__ : Any =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom new_val.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val return [float(UpperCamelCase__ ) for i in new_val] def _a( UpperCamelCase__ : NDArray[floataa] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape SCREAMING_SNAKE_CASE__ : Any =True for i in range(0, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int =0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _a( UpperCamelCase__ : ndarray ): '''simple docstring''' return np.dot(UpperCamelCase__, UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , *, __lowercase : float = np.inf , __lowercase : str = "linear" , __lowercase : float = 0.0 , ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =regularization SCREAMING_SNAKE_CASE__ : Optional[Any] =gamma if kernel == "linear": SCREAMING_SNAKE_CASE__ : List[Any] =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('''rbf kernel requires gamma''' ) if not isinstance(self.gamma , (float, int) ): raise ValueError('''gamma must be float or int''' ) if not self.gamma > 0: raise ValueError('''gamma must be > 0''' ) SCREAMING_SNAKE_CASE__ : List[str] =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: SCREAMING_SNAKE_CASE__ : List[str] =F"Unknown kernel: {kernel}" raise ValueError(__lowercase ) def __magic_name__ ( self : List[Any] , __lowercase : ndarray , __lowercase : ndarray ) -> float: return np.dot(__lowercase , __lowercase ) def __magic_name__ ( self : Tuple , __lowercase : ndarray , __lowercase : ndarray ) -> float: return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def __magic_name__ ( self : Optional[int] , __lowercase : list[ndarray] , __lowercase : ndarray ) -> None: SCREAMING_SNAKE_CASE__ : Tuple =observations SCREAMING_SNAKE_CASE__ : Union[str, Any] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations (SCREAMING_SNAKE_CASE__ ) : Union[str, Any] =np.shape(__lowercase ) def to_minimize(__lowercase : ndarray ) -> float: SCREAMING_SNAKE_CASE__ : Optional[Any] =0 (SCREAMING_SNAKE_CASE__ ) : str =np.shape(__lowercase ) for i in range(__lowercase ): for j in range(__lowercase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =LinearConstraint(__lowercase , 0 , 0 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =Bounds(0 , self.regularization ) SCREAMING_SNAKE_CASE__ : Dict =minimize( __lowercase , np.ones(__lowercase ) , bounds=__lowercase , constraints=[ly_contraint] ).x SCREAMING_SNAKE_CASE__ : List[Any] =l_star # calculating mean offset of separation plane to points SCREAMING_SNAKE_CASE__ : Optional[int] =0 for i in range(__lowercase ): for j in range(__lowercase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) SCREAMING_SNAKE_CASE__ : List[Any] =s / n def __magic_name__ ( self : Union[str, Any] , __lowercase : ndarray ) -> int: SCREAMING_SNAKE_CASE__ : Any =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __lowercase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
714
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' with open(UpperCamelCase__ ) as metadata_file: SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module'''] # Load the entity vocab file SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer''' with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0] SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0] SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name] SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias'''] SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key] else: SCREAMING_SNAKE_CASE__ : Any =state_dict[key] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(UpperCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' ) SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9) SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.''' SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist() SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE__ : Dict =[ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def _a( UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]'''] SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Optional[int] ={} for entry in data: SCREAMING_SNAKE_CASE__ : Tuple =entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE__ : str =entity_id break SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}" SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id return new_mapping if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) a_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule a_ = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
715
'''simple docstring''' def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] =True for i in range(UpperCamelCase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] =True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[Any] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : str, UpperCamelCase__ : str, UpperCamelCase__ : Path, UpperCamelCase__ : str = None, UpperCamelCase__ : str = None, UpperCamelCase__ : str = None, ): '''simple docstring''' if config_name_or_path is None: SCREAMING_SNAKE_CASE__ : List[Any] ='''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base''' if generator_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE__ : int =generator_name_or_path if question_encoder_tokenizer_name_or_path is None: SCREAMING_SNAKE_CASE__ : List[str] =question_encoder_name_or_path SCREAMING_SNAKE_CASE__ : int =RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration # Save model. SCREAMING_SNAKE_CASE__ : Dict =RagConfig.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =AutoConfig.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =AutoConfig.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =gen_config SCREAMING_SNAKE_CASE__ : int =question_encoder_config SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class.from_pretrained_question_encoder_generator( UpperCamelCase__, UpperCamelCase__, config=UpperCamelCase__ ) rag_model.save_pretrained(UpperCamelCase__ ) # Sanity check. model_class.from_pretrained(UpperCamelCase__ ) # Save tokenizers. SCREAMING_SNAKE_CASE__ : List[str] =AutoTokenizer.from_pretrained(UpperCamelCase__ ) gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' ) SCREAMING_SNAKE_CASE__ : int =AutoTokenizer.from_pretrained(UpperCamelCase__ ) question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) a_ = parser.parse_args() a_ = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
716
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =parent SCREAMING_SNAKE_CASE__ : Any =batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length SCREAMING_SNAKE_CASE__ : Dict =is_training SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids SCREAMING_SNAKE_CASE__ : List[Any] =use_labels SCREAMING_SNAKE_CASE__ : int =vocab_size SCREAMING_SNAKE_CASE__ : str =hidden_size SCREAMING_SNAKE_CASE__ : Any =embedding_size SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers SCREAMING_SNAKE_CASE__ : str =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range SCREAMING_SNAKE_CASE__ : str =num_labels SCREAMING_SNAKE_CASE__ : List[str] =num_choices SCREAMING_SNAKE_CASE__ : List[str] =scope def __magic_name__ ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int =None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : int =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =None SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Optional[int] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : List[str] ) -> Any: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =self.num_choices SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[Any] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__ ( self : str ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True # test_resize_embeddings = False snake_case_ = False def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self ) SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str ) -> Dict: self.config_tester.run_common_tests() def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase ) def __magic_name__ ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase ) def __magic_name__ ( self : Dict ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase ) def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase ) def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' return torch.tensor( UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, ) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__ ( self : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase ) model.to(__lowercase ) model.half() SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0] SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj] SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj] SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase ) self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
665
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def _a( UpperCamelCase__ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] =4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int =4 SCREAMING_SNAKE_CASE__ : Any =4_8 SCREAMING_SNAKE_CASE__ : List[Any] ='''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] =[6, 6, 6, 6] SCREAMING_SNAKE_CASE__ : Tuple =6_0 SCREAMING_SNAKE_CASE__ : List[Any] =[6, 6, 6, 6] SCREAMING_SNAKE_CASE__ : Tuple ='''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[Any] =4 SCREAMING_SNAKE_CASE__ : Any ='''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Union[str, Any] =1 SCREAMING_SNAKE_CASE__ : int =1 SCREAMING_SNAKE_CASE__ : Any =1_2_6 SCREAMING_SNAKE_CASE__ : Dict =7 SCREAMING_SNAKE_CASE__ : Optional[Any] =2_5_5.0 SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''''' return config def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Dict ): '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: SCREAMING_SNAKE_CASE__ : str =name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ : List[Any] =name.replace('''patch_embed.norm''', '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: SCREAMING_SNAKE_CASE__ : List[str] =name.replace('''layers''', '''encoder.stages''' ) if "residual_group.blocks" in name: SCREAMING_SNAKE_CASE__ : List[Any] =name.replace('''residual_group.blocks''', '''layers''' ) if "attn.proj" in name: SCREAMING_SNAKE_CASE__ : List[Any] =name.replace('''attn.proj''', '''attention.output.dense''' ) if "attn" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] =name.replace('''attn''', '''attention.self''' ) if "norm1" in name: SCREAMING_SNAKE_CASE__ : Any =name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: SCREAMING_SNAKE_CASE__ : str =name.replace('''norm2''', '''layernorm_after''' ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ : Optional[Any] =name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ : Optional[int] =name.replace('''mlp.fc2''', '''output.dense''' ) if "q_bias" in name: SCREAMING_SNAKE_CASE__ : List[Any] =name.replace('''q_bias''', '''query.bias''' ) if "k_bias" in name: SCREAMING_SNAKE_CASE__ : List[str] =name.replace('''k_bias''', '''key.bias''' ) if "v_bias" in name: SCREAMING_SNAKE_CASE__ : List[str] =name.replace('''v_bias''', '''value.bias''' ) if "cpb_mlp" in name: SCREAMING_SNAKE_CASE__ : int =name.replace('''cpb_mlp''', '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ : Tuple =name.replace('''patch_embed.proj''', '''patch_embed.projection''' ) if name == "norm.weight": SCREAMING_SNAKE_CASE__ : Optional[Any] ='''layernorm.weight''' if name == "norm.bias": SCREAMING_SNAKE_CASE__ : Optional[int] ='''layernorm.bias''' if "conv_first" in name: SCREAMING_SNAKE_CASE__ : str =name.replace('''conv_first''', '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: SCREAMING_SNAKE_CASE__ : Any =name.replace('''conv_last''', '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: SCREAMING_SNAKE_CASE__ : str =name.replace('''conv_before_upsample.0''', '''conv_before_upsample''' ) if "upsample.0" in name: SCREAMING_SNAKE_CASE__ : str =name.replace('''upsample.0''', '''upsample.convolution_0''' ) if "upsample.2" in name: SCREAMING_SNAKE_CASE__ : Union[str, Any] =name.replace('''upsample.2''', '''upsample.convolution_1''' ) SCREAMING_SNAKE_CASE__ : Tuple ='''upsample.''' + name elif config.upsampler == "pixelshuffledirect": SCREAMING_SNAKE_CASE__ : int =name.replace('''upsample.0.weight''', '''upsample.conv.weight''' ) SCREAMING_SNAKE_CASE__ : str =name.replace('''upsample.0.bias''', '''upsample.conv.bias''' ) else: pass else: SCREAMING_SNAKE_CASE__ : int ='''swin2sr.''' + name return name def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple ): '''simple docstring''' for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ : List[Any] =orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ : Union[str, Any] =key.split('''.''' ) SCREAMING_SNAKE_CASE__ : List[Any] =int(key_split[1] ) SCREAMING_SNAKE_CASE__ : Dict =int(key_split[4] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =config.embed_dim if "weight" in key: SCREAMING_SNAKE_CASE__ : List[Any] =val[:dim, :] SCREAMING_SNAKE_CASE__ : Union[str, Any] =val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ : Tuple =val[-dim:, :] else: SCREAMING_SNAKE_CASE__ : str =val[:dim] SCREAMING_SNAKE_CASE__ : List[Any] =val[dim : dim * 2] SCREAMING_SNAKE_CASE__ : Dict =val[-dim:] pass else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =val return orig_state_dict def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =get_config(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Dict =SwinaSRForImageSuperResolution(UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =torch.hub.load_state_dict_from_url(UpperCamelCase__, map_location='''cpu''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =convert_state_dict(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(UpperCamelCase__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values SCREAMING_SNAKE_CASE__ : Any ='''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw ).convert('''RGB''' ) SCREAMING_SNAKE_CASE__ : List[Any] =SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values SCREAMING_SNAKE_CASE__ : List[Any] =1_2_6 if '''Jpeg''' in checkpoint_url else 2_5_6 SCREAMING_SNAKE_CASE__ : str =Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) SCREAMING_SNAKE_CASE__ : int =transforms(UpperCamelCase__ ).unsqueeze(0 ) if config.num_channels == 1: SCREAMING_SNAKE_CASE__ : Optional[Any] =pixel_values[:, 0, :, :].unsqueeze(1 ) SCREAMING_SNAKE_CASE__ : List[str] =model(UpperCamelCase__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: SCREAMING_SNAKE_CASE__ : int =torch.Size([1, 3, 5_1_2, 5_1_2] ) SCREAMING_SNAKE_CASE__ : Tuple =torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Any =torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) SCREAMING_SNAKE_CASE__ : Dict =torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here SCREAMING_SNAKE_CASE__ : Tuple =torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: SCREAMING_SNAKE_CASE__ : List[Any] =torch.Size([1, 3, 5_1_2, 5_1_2] ) SCREAMING_SNAKE_CASE__ : Tuple =torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], UpperCamelCase__, atol=1e-3 ) print('''Looks ok!''' ) SCREAMING_SNAKE_CASE__ : List[str] ={ '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } SCREAMING_SNAKE_CASE__ : int =url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth', type=str, help='URL of the original Swin2SR checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.') a_ = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
717
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args execute_subprocess_async(__lowercase , env=os.environ.copy() )
665
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a_ = list[list[float | int]] def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col] SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0] SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : Tuple =0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, UpperCamelCase__ ): for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col] for cola in range(UpperCamelCase__, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ ) ] def _a( UpperCamelCase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Dict =y_val SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ ) def interpolated_func(UpperCamelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCamelCase__ ) ) return interpolated_func def _a( UpperCamelCase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Any =1 while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ): x_val += 1 ret += poly(UpperCamelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
718
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = ShapEImgaImgPipeline snake_case_ = ["""image"""] snake_case_ = ["""image"""] snake_case_ = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[Any] ) -> List[Any]: return 32 @property def __magic_name__ ( self : List[str] ) -> Optional[int]: return 32 @property def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: return self.time_input_dim * 4 @property def __magic_name__ ( self : Dict ) -> Union[str, Any]: return 8 @property def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor( crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __magic_name__ ( self : List[str] ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : str ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase ) return model def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.dummy_prior SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , ) SCREAMING_SNAKE_CASE__ : Any ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any: SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : int ='''cpu''' SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] =np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __magic_name__ ( self : List[Any] ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __magic_name__ ( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu''' SCREAMING_SNAKE_CASE__ : Optional[Any] =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , ) def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =1 SCREAMING_SNAKE_CASE__ : List[str] =2 SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase ) for key in inputs.keys(): if key in self.batch_params: SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]] SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Optional[Any] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) SCREAMING_SNAKE_CASE__ : Dict =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple =pipe( __lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
719
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_bigcode""" snake_case_ = ["""past_key_values"""] snake_case_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions SCREAMING_SNAKE_CASE__ : Dict =n_embd SCREAMING_SNAKE_CASE__ : Dict =n_layer SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head SCREAMING_SNAKE_CASE__ : List[str] =n_inner SCREAMING_SNAKE_CASE__ : List[str] =activation_function SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon SCREAMING_SNAKE_CASE__ : List[str] =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : Dict =multi_query SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ): '''simple docstring''' while b: SCREAMING_SNAKE_CASE__ : Dict =b, a % b return a def _a( UpperCamelCase__ : int, UpperCamelCase__ : int ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(UpperCamelCase__, a % b ) def _a( ): '''simple docstring''' print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}" ) print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}" ) print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}" ) print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}" ) print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}" ) print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}" ) print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}" ) print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}" ) print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}" ) print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}" ) if __name__ == "__main__": main()
720
'''simple docstring''' class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Union[str, Any] =size SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size SCREAMING_SNAKE_CASE__ : str =[0] * size @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return index | (index + 1) @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return (index & (index + 1)) - 1 def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =value while index < self.size: SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1 if current_left_border == index: SCREAMING_SNAKE_CASE__ : List[str] =value else: SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase ) def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int: right -= 1 # Because of right is exclusive SCREAMING_SNAKE_CASE__ : str =0 while left <= right: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase ) if left <= current_left: SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] ) SCREAMING_SNAKE_CASE__ : Any =current_left else: SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
665
0
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = 42 snake_case_ = 42 class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : list[list[Edge]] =[[] for _ in range(__lowercase )] SCREAMING_SNAKE_CASE__ : Dict =size def __getitem__( self : Dict , __lowercase : int ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def __magic_name__ ( self : List[str] ) -> str: return self._size def __magic_name__ ( self : Tuple , __lowercase : int , __lowercase : int , __lowercase : int ) -> List[Any]: if weight not in (0, 1): raise ValueError('''Edge weight must be either 0 or 1.''' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('''Vertex indexes must be in [0; size).''' ) self._graph[from_vertex].append(Edge(__lowercase , __lowercase ) ) def __magic_name__ ( self : str , __lowercase : int , __lowercase : int ) -> int | None: SCREAMING_SNAKE_CASE__ : Union[str, Any] =deque([start_vertex] ) SCREAMING_SNAKE_CASE__ : list[int | None] =[None] * self.size SCREAMING_SNAKE_CASE__ : Dict =0 while queue: SCREAMING_SNAKE_CASE__ : List[str] =queue.popleft() SCREAMING_SNAKE_CASE__ : Optional[Any] =distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: SCREAMING_SNAKE_CASE__ : str =current_distance + edge.weight SCREAMING_SNAKE_CASE__ : Optional[int] =distances[edge.destination_vertex] if ( isinstance(__lowercase , __lowercase ) and new_distance >= dest_vertex_distance ): continue SCREAMING_SNAKE_CASE__ : List[Any] =new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('''No path from start_vertex to finish_vertex.''' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
721
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = ["""vqvae"""] def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int: super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase ) def __magic_name__ ( self : List[str] ) -> int: return 50 if isinstance(self.scheduler , __lowercase ) else 10_00 @torch.no_grad() def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps() self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowercase , device=self.device , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =noise SCREAMING_SNAKE_CASE__ : List[str] =None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample( generator=__lowercase )[0] SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images if start_step > 0: SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowercase ): SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample'''] else: SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample'''] if isinstance(self.scheduler , __lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample'''] else: SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start] if mask_end > 0: SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy() SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' ) SCREAMING_SNAKE_CASE__ : Any =list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) ) @torch.no_grad() def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __lowercase ) self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t] SCREAMING_SNAKE_CASE__ : int =( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor: SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
665
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =parent SCREAMING_SNAKE_CASE__ : List[str] =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : List[Any] =num_channels SCREAMING_SNAKE_CASE__ : int =patch_size SCREAMING_SNAKE_CASE__ : Tuple =num_frames SCREAMING_SNAKE_CASE__ : List[Any] =is_training SCREAMING_SNAKE_CASE__ : List[str] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_type SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : Any =scope SCREAMING_SNAKE_CASE__ : int =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : int =self.get_config() return config, pixel_values, labels def __magic_name__ ( self : int ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels return config def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) # verify the logits shape SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowercase ) def __magic_name__ ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : str ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester( self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int: SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : List[Any] ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ) -> Optional[int]: pass def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def __magic_name__ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> List[str]: if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames SCREAMING_SNAKE_CASE__ : Optional[Any] =True SCREAMING_SNAKE_CASE__ : str =False SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ : List[Any] =True SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__ : Optional[int] =True SCREAMING_SNAKE_CASE__ : Union[str, Any] =True SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) self.assertEqual(out_len + 1 , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self : Tuple ) -> List[Any]: def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ): SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowercase ) , __lowercase ) SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : List[str] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : Any ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor SCREAMING_SNAKE_CASE__ : Tuple =prepare_video() SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
700
'''simple docstring''' from math import isqrt def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =[True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Any =False return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]] def _a( UpperCamelCase__ : int = 1_0**8 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' a_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' a_ = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def __magic_name__ ( self : Optional[int] ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , ) def __magic_name__ ( self : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =0.0 for i, j in zip(__lowercase , __lowercase ): n_correct += 1.0 if math_equivalence.is_equiv(__lowercase , __lowercase ) else 0.0 SCREAMING_SNAKE_CASE__ : str =n_correct / len(__lowercase ) return { "accuracy": accuracy, }
701
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = SpeechTaTokenizer snake_case_ = False snake_case_ = True def __magic_name__ ( self : int ) -> Any: super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test''' SCREAMING_SNAKE_CASE__ : int ='''this is a test''' return input_text, output_text def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase ) return text, ids def __magic_name__ ( self : Dict ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>''' SCREAMING_SNAKE_CASE__ : Optional[int] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__lowercase ) , 81 ) def __magic_name__ ( self : Dict ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __magic_name__ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Any =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : int =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size_a + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __magic_name__ ( self : Optional[Any] ) -> Any: pass def __magic_name__ ( self : List[str] ) -> List[Any]: pass def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase ) # fmt: off self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def __magic_name__ ( self : List[str] ) -> List[str]: # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE__ : List[Any] =[ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off SCREAMING_SNAKE_CASE__ : str ={ '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
665
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """wavlm""" def __init__( self : List[Any] , __lowercase : Any=32 , __lowercase : List[str]=7_68 , __lowercase : Any=12 , __lowercase : Any=12 , __lowercase : Dict=30_72 , __lowercase : Union[str, Any]="gelu" , __lowercase : List[str]=0.1 , __lowercase : List[str]=0.1 , __lowercase : Dict=0.1 , __lowercase : Union[str, Any]=0.0 , __lowercase : str=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Dict=0.02 , __lowercase : Any=1e-5 , __lowercase : Union[str, Any]="group" , __lowercase : List[str]="gelu" , __lowercase : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowercase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __lowercase : Dict=(10, 3, 3, 3, 3, 2, 2) , __lowercase : str=False , __lowercase : Optional[int]=1_28 , __lowercase : Dict=16 , __lowercase : int=3_20 , __lowercase : Optional[Any]=8_00 , __lowercase : Optional[int]=False , __lowercase : int=True , __lowercase : str=0.05 , __lowercase : Any=10 , __lowercase : List[Any]=2 , __lowercase : Optional[Any]=0.0 , __lowercase : Any=10 , __lowercase : Dict=3_20 , __lowercase : Optional[Any]=2 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=1_00 , __lowercase : Optional[Any]=2_56 , __lowercase : Dict=2_56 , __lowercase : Dict=0.1 , __lowercase : Any="mean" , __lowercase : List[Any]=False , __lowercase : List[Any]=False , __lowercase : Any=2_56 , __lowercase : List[Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowercase : Optional[int]=(5, 3, 3, 1, 1) , __lowercase : Optional[int]=(1, 2, 3, 1, 1) , __lowercase : str=5_12 , __lowercase : Any=80 , __lowercase : List[Any]=0 , __lowercase : Optional[Any]=1 , __lowercase : int=2 , __lowercase : Tuple=False , __lowercase : int=3 , __lowercase : Tuple=2 , __lowercase : List[Any]=3 , __lowercase : Tuple=None , **__lowercase : Any , ) -> Any: super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase ) SCREAMING_SNAKE_CASE__ : int =hidden_size SCREAMING_SNAKE_CASE__ : str =feat_extract_norm SCREAMING_SNAKE_CASE__ : str =feat_extract_activation SCREAMING_SNAKE_CASE__ : int =list(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =list(__lowercase ) SCREAMING_SNAKE_CASE__ : int =list(__lowercase ) SCREAMING_SNAKE_CASE__ : int =conv_bias SCREAMING_SNAKE_CASE__ : Tuple =num_buckets SCREAMING_SNAKE_CASE__ : List[str] =max_bucket_distance SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_conv_pos_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE__ : Optional[int] =len(self.conv_dim ) SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers SCREAMING_SNAKE_CASE__ : Dict =intermediate_size SCREAMING_SNAKE_CASE__ : int =hidden_act SCREAMING_SNAKE_CASE__ : List[Any] =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =hidden_dropout SCREAMING_SNAKE_CASE__ : Dict =attention_dropout SCREAMING_SNAKE_CASE__ : List[str] =activation_dropout SCREAMING_SNAKE_CASE__ : List[Any] =feat_proj_dropout SCREAMING_SNAKE_CASE__ : str =final_dropout SCREAMING_SNAKE_CASE__ : List[Any] =layerdrop SCREAMING_SNAKE_CASE__ : List[str] =layer_norm_eps SCREAMING_SNAKE_CASE__ : int =initializer_range SCREAMING_SNAKE_CASE__ : List[str] =num_ctc_classes SCREAMING_SNAKE_CASE__ : Tuple =vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] =do_stable_layer_norm SCREAMING_SNAKE_CASE__ : str =use_weighted_layer_sum SCREAMING_SNAKE_CASE__ : Tuple =classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE__ : int =apply_spec_augment SCREAMING_SNAKE_CASE__ : Tuple =mask_time_prob SCREAMING_SNAKE_CASE__ : Any =mask_time_length SCREAMING_SNAKE_CASE__ : Dict =mask_time_min_masks SCREAMING_SNAKE_CASE__ : Optional[Any] =mask_feature_prob SCREAMING_SNAKE_CASE__ : List[Any] =mask_feature_length # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE__ : int =num_codevectors_per_group SCREAMING_SNAKE_CASE__ : List[Any] =num_codevector_groups SCREAMING_SNAKE_CASE__ : Union[str, Any] =contrastive_logits_temperature SCREAMING_SNAKE_CASE__ : Optional[Any] =num_negatives SCREAMING_SNAKE_CASE__ : List[Any] =codevector_dim SCREAMING_SNAKE_CASE__ : Optional[Any] =proj_codevector_dim SCREAMING_SNAKE_CASE__ : int =diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE__ : List[Any] =ctc_loss_reduction SCREAMING_SNAKE_CASE__ : Any =ctc_zero_infinity # adapter SCREAMING_SNAKE_CASE__ : Optional[int] =add_adapter SCREAMING_SNAKE_CASE__ : List[str] =adapter_kernel_size SCREAMING_SNAKE_CASE__ : int =adapter_stride SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_adapter_layers SCREAMING_SNAKE_CASE__ : Optional[int] =output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE__ : Any =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE__ : Dict =list(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =list(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =list(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =xvector_output_dim @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
702
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =parent SCREAMING_SNAKE_CASE__ : List[str] =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : List[Any] =num_channels SCREAMING_SNAKE_CASE__ : int =patch_size SCREAMING_SNAKE_CASE__ : Tuple =num_frames SCREAMING_SNAKE_CASE__ : List[Any] =is_training SCREAMING_SNAKE_CASE__ : List[str] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_type SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : Any =scope SCREAMING_SNAKE_CASE__ : int =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : int =self.get_config() return config, pixel_values, labels def __magic_name__ ( self : int ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels return config def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) # verify the logits shape SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowercase ) def __magic_name__ ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : str ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester( self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int: SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : List[Any] ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ) -> Optional[int]: pass def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def __magic_name__ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> List[str]: if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames SCREAMING_SNAKE_CASE__ : Optional[Any] =True SCREAMING_SNAKE_CASE__ : str =False SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ : List[Any] =True SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__ : Optional[int] =True SCREAMING_SNAKE_CASE__ : Union[str, Any] =True SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) self.assertEqual(out_len + 1 , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self : Tuple ) -> List[Any]: def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ): SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowercase ) , __lowercase ) SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : List[str] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : Any ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor SCREAMING_SNAKE_CASE__ : Tuple =prepare_video() SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
665
0
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : Any , *__lowercase : List[Any] , **__lowercase : List[Any] ) -> Union[str, Any]: super().__init__(*__lowercase , **__lowercase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def __magic_name__ ( self : Union[str, Any] , __lowercase : str=None , __lowercase : str=None , __lowercase : Any=None ) -> Any: SCREAMING_SNAKE_CASE__ : List[Any] ={} SCREAMING_SNAKE_CASE__ : Tuple ={} if prompt is not None: SCREAMING_SNAKE_CASE__ : Optional[int] =prompt if generate_kwargs is not None: SCREAMING_SNAKE_CASE__ : int =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: SCREAMING_SNAKE_CASE__ : Optional[int] ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) SCREAMING_SNAKE_CASE__ : List[str] =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Any , __lowercase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__lowercase : Optional[int] ) -> Tuple: return super().__call__(__lowercase , **__lowercase ) def __magic_name__ ( self : int , __lowercase : str , __lowercase : str=None ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : str =load_image(__lowercase ) if prompt is not None: if not isinstance(__lowercase , __lowercase ): raise ValueError( F"Received an invalid text input, got - {type(__lowercase )} - but expected a single string. " '''Note also that one single text can be provided for conditional image to text generation.''' ) SCREAMING_SNAKE_CASE__ : Tuple =self.model.config.model_type if model_type == "git": SCREAMING_SNAKE_CASE__ : Any =self.image_processor(images=__lowercase , return_tensors=self.framework ) SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer(text=__lowercase , add_special_tokens=__lowercase ).input_ids SCREAMING_SNAKE_CASE__ : Optional[int] =[self.tokenizer.cls_token_id] + input_ids SCREAMING_SNAKE_CASE__ : str =torch.tensor(__lowercase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.image_processor(images=__lowercase , header_text=__lowercase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation SCREAMING_SNAKE_CASE__ : List[str] =self.image_processor(images=__lowercase , return_tensors=self.framework ) SCREAMING_SNAKE_CASE__ : Dict =self.tokenizer(__lowercase , return_tensors=self.framework ) model_inputs.update(__lowercase ) else: raise ValueError(F"Model type {model_type} does not support conditional text generation" ) else: SCREAMING_SNAKE_CASE__ : List[Any] =self.image_processor(images=__lowercase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: SCREAMING_SNAKE_CASE__ : List[Any] =None return model_inputs def __magic_name__ ( self : int , __lowercase : Any , __lowercase : Optional[int]=None ) -> Optional[int]: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , __lowercase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): SCREAMING_SNAKE_CASE__ : List[Any] =None if generate_kwargs is None: SCREAMING_SNAKE_CASE__ : Any ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. SCREAMING_SNAKE_CASE__ : int =model_inputs.pop(self.model.main_input_name ) SCREAMING_SNAKE_CASE__ : int =self.model.generate(__lowercase , **__lowercase , **__lowercase ) return model_outputs def __magic_name__ ( self : int , __lowercase : Dict ) -> str: SCREAMING_SNAKE_CASE__ : Any =[] for output_ids in model_outputs: SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''generated_text''': self.tokenizer.decode( __lowercase , skip_special_tokens=__lowercase , ) } records.append(__lowercase ) return records
703
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } a_ = { 'bert-base-uncased': 5_1_2, 'bert-large-uncased': 5_1_2, 'bert-base-cased': 5_1_2, 'bert-large-cased': 5_1_2, 'bert-base-multilingual-uncased': 5_1_2, 'bert-base-multilingual-cased': 5_1_2, 'bert-base-chinese': 5_1_2, 'bert-base-german-cased': 5_1_2, 'bert-large-uncased-whole-word-masking': 5_1_2, 'bert-large-cased-whole-word-masking': 5_1_2, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-base-cased-finetuned-mrpc': 5_1_2, 'bert-base-german-dbmdz-cased': 5_1_2, 'bert-base-german-dbmdz-uncased': 5_1_2, 'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2, 'wietsedv/bert-base-dutch-cased': 5_1_2, } a_ = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = BertTokenizer def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]: super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE__ : Any =do_lower_case SCREAMING_SNAKE_CASE__ : Any =strip_accents SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : int = 1_0_0_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =2**power SCREAMING_SNAKE_CASE__ : str =str(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =0 for i in list_num: sum_of_num += int(UpperCamelCase__ ) return sum_of_num if __name__ == "__main__": a_ = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) a_ = solution(power) print('Sum of the digits is: ', result)
704
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a_ = False a_ = False def _a( UpperCamelCase__ : Namespace ): '''simple docstring''' return TrainCommand(UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): @staticmethod def __magic_name__ ( __lowercase : ArgumentParser ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' ) SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =args.output SCREAMING_SNAKE_CASE__ : str =args.column_label SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text SCREAMING_SNAKE_CASE__ : Tuple =args.column_id self.logger.info(F"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"Loading dataset from {args.train_data}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =None if args.validation_data: self.logger.info(F"Loading validation dataset from {args.validation_data}" ) SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon def __magic_name__ ( self : Any ) -> str: if self.framework == "tf": return self.run_tf() return self.run_torch() def __magic_name__ ( self : Optional[int] ) -> Tuple: raise NotImplementedError def __magic_name__ ( self : Dict ) -> List[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
665
0
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets lowerCAmelCase_ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n' lowerCAmelCase_ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n' lowerCAmelCase_ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}' def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : Optional[int], UpperCamelCase__ : bool, UpperCamelCase__ : Optional[Dict[int, int]] = None, UpperCamelCase__ : bool = False, ): '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): SCREAMING_SNAKE_CASE__ : Optional[int] =new_id # turn into Numpy arrays SCREAMING_SNAKE_CASE__ : int =np.array(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int =np.array(UpperCamelCase__ ) if reduce_labels: SCREAMING_SNAKE_CASE__ : str =2_5_5 SCREAMING_SNAKE_CASE__ : str =label - 1 SCREAMING_SNAKE_CASE__ : Tuple =2_5_5 SCREAMING_SNAKE_CASE__ : Union[str, Any] =label != ignore_index SCREAMING_SNAKE_CASE__ : str =np.not_equal(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =pred_label[mask] SCREAMING_SNAKE_CASE__ : Tuple =np.array(UpperCamelCase__ )[mask] SCREAMING_SNAKE_CASE__ : List[Any] =pred_label[pred_label == label] SCREAMING_SNAKE_CASE__ : List[Any] =np.histogram(UpperCamelCase__, bins=UpperCamelCase__, range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] =np.histogram(UpperCamelCase__, bins=UpperCamelCase__, range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : List[str] =np.histogram(UpperCamelCase__, bins=UpperCamelCase__, range=(0, num_labels - 1) )[0] SCREAMING_SNAKE_CASE__ : str =area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : int, UpperCamelCase__ : bool, UpperCamelCase__ : Optional[Dict[int, int]] = None, UpperCamelCase__ : bool = False, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =np.zeros((num_labels,), dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : List[str] =np.zeros((num_labels,), dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : Dict =np.zeros((num_labels,), dtype=np.floataa ) SCREAMING_SNAKE_CASE__ : List[str] =np.zeros((num_labels,), dtype=np.floataa ) for result, gt_seg_map in zip(UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =intersect_and_union( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : bool, UpperCamelCase__ : Optional[int] = None, UpperCamelCase__ : Optional[Dict[int, int]] = None, UpperCamelCase__ : bool = False, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =total_intersect_and_union( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # compute metrics SCREAMING_SNAKE_CASE__ : Tuple ={} SCREAMING_SNAKE_CASE__ : List[Any] =total_area_intersect.sum() / total_area_label.sum() SCREAMING_SNAKE_CASE__ : Union[str, Any] =total_area_intersect / total_area_union SCREAMING_SNAKE_CASE__ : int =total_area_intersect / total_area_label SCREAMING_SNAKE_CASE__ : Tuple =np.nanmean(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =np.nanmean(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int =all_acc SCREAMING_SNAKE_CASE__ : int =iou SCREAMING_SNAKE_CASE__ : Optional[Any] =acc if nan_to_num is not None: SCREAMING_SNAKE_CASE__ : Tuple ={metric: np.nan_to_num(UpperCamelCase__, nan=UpperCamelCase__ ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def __magic_name__ ( self : Dict ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : str , __lowercase : int , __lowercase : bool , __lowercase : Optional[int] = None , __lowercase : Optional[Dict[int, int]] = None , __lowercase : bool = False , ) -> int: SCREAMING_SNAKE_CASE__ : Dict =mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
705
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""] snake_case_ = [ """image_embeds""", """negative_image_embeds""", """image""", ] snake_case_ = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[str] ) -> Tuple: return 32 @property def __magic_name__ ( self : List[str] ) -> str: return 32 @property def __magic_name__ ( self : Any ) -> Optional[int]: return self.time_input_dim @property def __magic_name__ ( self : List[Any] ) -> int: return self.time_input_dim * 4 @property def __magic_name__ ( self : Tuple ) -> Optional[int]: return 1_00 @property def __magic_name__ ( self : Union[str, Any] ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase ) return model @property def __magic_name__ ( self : Dict ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs ) return model def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : str ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu''' SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple =np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : int =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase ): snake_case_ = """pixel_values""" snake_case_ = False snake_case_ = TimmBackboneConfig def __init__( self : Union[str, Any] , __lowercase : Optional[Any] , **__lowercase : Optional[int] ) -> Any: requires_backends(self , '''timm''' ) super().__init__(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(F"backbone {config.backbone} is not supported by timm." ) if hasattr(__lowercase , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) SCREAMING_SNAKE_CASE__ : Any =getattr(__lowercase , '''use_pretrained_backbone''' , __lowercase ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. SCREAMING_SNAKE_CASE__ : Optional[int] =config.out_indices if getattr(__lowercase , '''out_indices''' , __lowercase ) is not None else (-1,) SCREAMING_SNAKE_CASE__ : str =timm.create_model( config.backbone , pretrained=__lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowercase , **__lowercase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. SCREAMING_SNAKE_CASE__ : List[str] =self._backbone.return_layers SCREAMING_SNAKE_CASE__ : List[Any] ={layer['''module''']: str(__lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__lowercase ) @classmethod def __magic_name__ ( cls : str , __lowercase : Optional[Any] , *__lowercase : str , **__lowercase : Optional[Any] ) -> Dict: requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''config''' , TimmBackboneConfig() ) SCREAMING_SNAKE_CASE__ : str =kwargs.pop('''use_timm_backbone''' , __lowercase ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =kwargs.pop('''num_channels''' , config.num_channels ) SCREAMING_SNAKE_CASE__ : Tuple =kwargs.pop('''features_only''' , config.features_only ) SCREAMING_SNAKE_CASE__ : str =kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) SCREAMING_SNAKE_CASE__ : Any =kwargs.pop('''out_indices''' , config.out_indices ) SCREAMING_SNAKE_CASE__ : List[Any] =TimmBackboneConfig( backbone=__lowercase , num_channels=__lowercase , features_only=__lowercase , use_pretrained_backbone=__lowercase , out_indices=__lowercase , ) return super()._from_config(__lowercase , **__lowercase ) def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> str: pass def __magic_name__ ( self : Tuple , __lowercase : Tuple , __lowercase : int=None , __lowercase : Optional[int]=None , __lowercase : List[str]=None , **__lowercase : Dict ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: SCREAMING_SNAKE_CASE__ : Optional[Any] =return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ : Optional[Any] =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ : List[Any] =output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone SCREAMING_SNAKE_CASE__ : Optional[int] =self._all_layers SCREAMING_SNAKE_CASE__ : str =self._backbone(__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self._return_layers SCREAMING_SNAKE_CASE__ : Optional[Any] =tuple(hidden_states[i] for i in self.out_indices ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._backbone(__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =None SCREAMING_SNAKE_CASE__ : Tuple =tuple(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =tuple(__lowercase ) if hidden_states is not None else None if not return_dict: SCREAMING_SNAKE_CASE__ : Dict =(feature_maps,) if output_hidden_states: SCREAMING_SNAKE_CASE__ : Dict =output + (hidden_states,) return output return BackboneOutput(feature_maps=__lowercase , hidden_states=__lowercase , attentions=__lowercase )
706
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): snake_case_ = 42 # Cache store of keys snake_case_ = 42 # References of the keys in cache snake_case_ = 10 # Maximum capacity of cache def __init__( self : Dict , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Any =deque() SCREAMING_SNAKE_CASE__ : str =set() if not n: SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =n def __magic_name__ ( self : List[str] , __lowercase : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop() self.key_reference.remove(__lowercase ) else: self.dq_store.remove(__lowercase ) self.dq_store.appendleft(__lowercase ) self.key_reference.add(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__lowercase ) def __repr__( self : List[Any] ) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}" if __name__ == "__main__": import doctest doctest.testmod() a_ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
665
0
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): snake_case_ = 42 # Cache store of keys snake_case_ = 42 # References of the keys in cache snake_case_ = 10 # Maximum capacity of cache def __init__( self : Dict , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Any =deque() SCREAMING_SNAKE_CASE__ : str =set() if not n: SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =n def __magic_name__ ( self : List[str] , __lowercase : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop() self.key_reference.remove(__lowercase ) else: self.dq_store.remove(__lowercase ) self.dq_store.appendleft(__lowercase ) self.key_reference.add(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__lowercase ) def __repr__( self : List[Any] ) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}" if __name__ == "__main__": import doctest doctest.testmod() a_ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
707
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a_ = list[list[float | int]] def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col] SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0] SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : Tuple =0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, UpperCamelCase__ ): for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col] for cola in range(UpperCamelCase__, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ ) ] def _a( UpperCamelCase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Dict =y_val SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ ) def interpolated_func(UpperCamelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCamelCase__ ) ) return interpolated_func def _a( UpperCamelCase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Any =1 while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ): x_val += 1 ret += poly(UpperCamelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' from math import ceil, sqrt def _a( UpperCamelCase__ : int = 1_0_0_0_0_0_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =0 for outer_width in range(3, (limit // 4) + 2 ): if outer_width**2 > limit: SCREAMING_SNAKE_CASE__ : List[str] =max(ceil(sqrt(outer_width**2 - limit ) ), 1 ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F'''{solution() = }''')
708
'''simple docstring''' def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE__ : Union[str, Any] =left SCREAMING_SNAKE_CASE__ : Optional[Any] =point elif point > right: SCREAMING_SNAKE_CASE__ : Optional[int] =right SCREAMING_SNAKE_CASE__ : Tuple =point else: if item < current_item: SCREAMING_SNAKE_CASE__ : str =point - 1 else: SCREAMING_SNAKE_CASE__ : Tuple =point + 1 return None def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ ) def _a( UpperCamelCase__ : Dict ): '''simple docstring''' if collection != sorted(UpperCamelCase__ ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys a_ = 0 if debug == 1: a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') a_ = 6_7 a_ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
665
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): def __init__( self : str , __lowercase : list[T] , __lowercase : Callable[[T, T], T] ) -> None: SCREAMING_SNAKE_CASE__ : Any | T =None SCREAMING_SNAKE_CASE__ : int =len(__lowercase ) SCREAMING_SNAKE_CASE__ : list[T] =[any_type for _ in range(self.N )] + arr SCREAMING_SNAKE_CASE__ : List[Any] =fnc self.build() def __magic_name__ ( self : List[Any] ) -> None: for p in range(self.N - 1 , 0 , -1 ): SCREAMING_SNAKE_CASE__ : Any =self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : T ) -> None: p += self.N SCREAMING_SNAKE_CASE__ : Dict =v while p > 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] =p // 2 SCREAMING_SNAKE_CASE__ : int =self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> T | None: # noqa: E741 SCREAMING_SNAKE_CASE__ : Tuple =l + self.N, r + self.N SCREAMING_SNAKE_CASE__ : T | None =None while l <= r: if l % 2 == 1: SCREAMING_SNAKE_CASE__ : Tuple =self.st[l] if res is None else self.fn(__lowercase , self.st[l] ) if r % 2 == 0: SCREAMING_SNAKE_CASE__ : List[Any] =self.st[r] if res is None else self.fn(__lowercase , self.st[r] ) SCREAMING_SNAKE_CASE__ : str =(l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce a_ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] a_ = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } a_ = SegmentTree(test_array, min) a_ = SegmentTree(test_array, max) a_ = SegmentTree(test_array, lambda a, b: a + b) def _a( ): '''simple docstring''' for i in range(len(UpperCamelCase__ ) ): for j in range(UpperCamelCase__, len(UpperCamelCase__ ) ): SCREAMING_SNAKE_CASE__ : Optional[int] =reduce(UpperCamelCase__, test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__ : Optional[Any] =reduce(UpperCamelCase__, test_array[i : j + 1] ) SCREAMING_SNAKE_CASE__ : Any =reduce(lambda UpperCamelCase__, UpperCamelCase__ : a + b, test_array[i : j + 1] ) assert min_range == min_segment_tree.query(UpperCamelCase__, UpperCamelCase__ ) assert max_range == max_segment_tree.query(UpperCamelCase__, UpperCamelCase__ ) assert sum_range == sum_segment_tree.query(UpperCamelCase__, UpperCamelCase__ ) test_all_segments() for index, value in test_updates.items(): a_ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
709
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @require_tf def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @slow @require_torch def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : str =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
710
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = JukeboxTokenizer snake_case_ = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def __magic_name__ ( self : Optional[int] ) -> str: import torch SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : str =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __magic_name__ ( self : Any ) -> List[str]: import torch SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
665
0
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar a_ = TypeVar('_T') class __SCREAMING_SNAKE_CASE ( Generic[_T] ): def __init__( self : List[Any] , __lowercase : Iterable[_T] | None = None ) -> None: SCREAMING_SNAKE_CASE__ : list[_T] =list(iterable or [] ) SCREAMING_SNAKE_CASE__ : list[_T] =[] def __len__( self : List[str] ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ) -> str: return F"Queue({tuple(self._stacka[::-1] + self._stacka )})" def __magic_name__ ( self : List[Any] , __lowercase : _T ) -> None: self._stacka.append(__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> _T: SCREAMING_SNAKE_CASE__ : List[str] =self._stacka.pop SCREAMING_SNAKE_CASE__ : List[Any] =self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('''Queue is empty''' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
711
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_neox""" def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Any =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE__ : Dict =hidden_act SCREAMING_SNAKE_CASE__ : str =rotary_pct SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout SCREAMING_SNAKE_CASE__ : str =classifier_dropout SCREAMING_SNAKE_CASE__ : Any =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : Any =use_cache SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
665
0
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , __lowercase : int , __lowercase : List[Any]=1_00 , __lowercase : Optional[int]=13 , __lowercase : Union[str, Any]=30 , __lowercase : List[Any]=2 , __lowercase : Any=3 , __lowercase : List[Any]=True , __lowercase : Any=True , __lowercase : Optional[Any]=32 , __lowercase : Tuple=4 , __lowercase : str=4 , __lowercase : Optional[Any]=37 , __lowercase : Union[str, Any]="gelu" , __lowercase : List[str]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Any=10 , __lowercase : Optional[Any]=0.02 , __lowercase : Union[str, Any]=3 , __lowercase : Union[str, Any]=None , __lowercase : Tuple=[0, 1, 2, 3] , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Dict =parent SCREAMING_SNAKE_CASE__ : Dict =1_00 SCREAMING_SNAKE_CASE__ : Dict =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : str =patch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_channels SCREAMING_SNAKE_CASE__ : List[Any] =is_training SCREAMING_SNAKE_CASE__ : Any =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : str =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size SCREAMING_SNAKE_CASE__ : List[Any] =initializer_range SCREAMING_SNAKE_CASE__ : Optional[int] =scope SCREAMING_SNAKE_CASE__ : Optional[int] =out_indices SCREAMING_SNAKE_CASE__ : Any =num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE__ : Tuple =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : Dict =num_patches + 1 def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Dict =None SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : Any ) -> int: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __magic_name__ ( self : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Any ) -> str: SCREAMING_SNAKE_CASE__ : Optional[Any] =BeitModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : str , __lowercase : List[str] , __lowercase : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : str =BeitForMaskedImageModeling(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __magic_name__ ( self : Tuple , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Any , __lowercase : Tuple ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Tuple =self.type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] =BeitForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE__ : Tuple =1 SCREAMING_SNAKE_CASE__ : Tuple =BeitForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.num_labels SCREAMING_SNAKE_CASE__ : str =BeitForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : str =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Tuple =config_and_inputs SCREAMING_SNAKE_CASE__ : List[Any] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": BeitModel, """image-classification""": BeitForImageClassification, """image-segmentation""": BeitForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Union[str, Any] =BeitModelTester(self ) SCREAMING_SNAKE_CASE__ : str =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : int ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def __magic_name__ ( self : Tuple ) -> Any: pass def __magic_name__ ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Dict =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def __magic_name__ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[Any] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : str ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Dict ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> List[Any]: if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling]: continue SCREAMING_SNAKE_CASE__ : Tuple =model_class(__lowercase ) model.to(__lowercase ) model.train() SCREAMING_SNAKE_CASE__ : Optional[int] =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase ).loss loss.backward() def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE__ : int =False SCREAMING_SNAKE_CASE__ : str =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__lowercase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue SCREAMING_SNAKE_CASE__ : Optional[int] =model_class(__lowercase ) model.gradient_checkpointing_enable() model.to(__lowercase ) model.train() SCREAMING_SNAKE_CASE__ : str =self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =model(**__lowercase ).loss loss.backward() def __magic_name__ ( self : Union[str, Any] ) -> List[str]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[str] =_config_zero_init(__lowercase ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(config=__lowercase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @slow def __magic_name__ ( self : Dict ) -> str: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =BeitModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : List[Any] ) -> Dict: return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def __magic_name__ ( self : Any ) -> int: SCREAMING_SNAKE_CASE__ : Any =BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =self.default_image_processor SCREAMING_SNAKE_CASE__ : List[str] =prepare_img() SCREAMING_SNAKE_CASE__ : Any =image_processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase ) # prepare bool_masked_pos SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.ones((1, 1_96) , dtype=torch.bool ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] =model(pixel_values=__lowercase , bool_masked_pos=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits # verify the logits SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 1_96, 81_92) ) self.assertEqual(logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : str =torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__lowercase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __lowercase , atol=1e-2 ) ) @slow def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : int =self.default_image_processor SCREAMING_SNAKE_CASE__ : int =prepare_img() SCREAMING_SNAKE_CASE__ : int =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : str =outputs.logits # verify the logits SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((1, 10_00) ) self.assertEqual(logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__lowercase ) self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) ) SCREAMING_SNAKE_CASE__ : Any =2_81 self.assertEqual(logits.argmax(-1 ).item() , __lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : Dict =BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =self.default_image_processor SCREAMING_SNAKE_CASE__ : int =prepare_img() SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : int =outputs.logits # verify the logits SCREAMING_SNAKE_CASE__ : int =torch.Size((1, 2_18_41) ) self.assertEqual(logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =torch.tensor([1.6881, -0.2787, 0.5901] ).to(__lowercase ) self.assertTrue(torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 ) ) SCREAMING_SNAKE_CASE__ : List[str] =23_96 self.assertEqual(logits.argmax(-1 ).item() , __lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =model.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =BeitImageProcessor(do_resize=__lowercase , size=6_40 , do_center_crop=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : int =Image.open(ds[0]['''file'''] ) SCREAMING_SNAKE_CASE__ : Any =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.logits # verify the logits SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((1, 1_50, 1_60, 1_60) ) self.assertEqual(logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: SCREAMING_SNAKE_CASE__ : int =torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=__lowercase , ) else: SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) ) @slow def __magic_name__ ( self : Dict ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =model.to(__lowercase ) SCREAMING_SNAKE_CASE__ : str =BeitImageProcessor(do_resize=__lowercase , size=6_40 , do_center_crop=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) SCREAMING_SNAKE_CASE__ : List[Any] =Image.open(ds[0]['''file'''] ) SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(5_00, 3_00)] ) SCREAMING_SNAKE_CASE__ : Any =torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =image_processor.post_process_semantic_segmentation(outputs=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Size((1_60, 1_60) ) self.assertEqual(segmentation[0].shape , __lowercase )
712
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int a_ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ): snake_case_ = None def _a( UpperCamelCase__ : "pyspark.sql.DataFrame", UpperCamelCase__ : List[int], ): '''simple docstring''' import pyspark def generate_fn(): SCREAMING_SNAKE_CASE__ : str =df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: SCREAMING_SNAKE_CASE__ : List[str] =df_with_partition_id.select('''*''' ).where(f"part_id = {partition_id}" ).drop('''part_id''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =partition_df.collect() SCREAMING_SNAKE_CASE__ : Any =0 for row in rows: yield f"{partition_id}_{row_id}", row.asDict() row_id += 1 return generate_fn class __SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ): def __init__( self : Optional[Any] , __lowercase : "pyspark.sql.DataFrame" , __lowercase : Optional[int]=None , ) -> Dict: SCREAMING_SNAKE_CASE__ : str =df SCREAMING_SNAKE_CASE__ : List[Any] =partition_order or range(self.df.rdd.getNumPartitions() ) SCREAMING_SNAKE_CASE__ : List[str] =_generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ) -> Tuple: yield from self.generate_examples_fn() def __magic_name__ ( self : str , __lowercase : np.random.Generator ) -> "SparkExamplesIterable": SCREAMING_SNAKE_CASE__ : str =list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__lowercase ) return SparkExamplesIterable(self.df , partition_order=__lowercase ) def __magic_name__ ( self : str , __lowercase : int , __lowercase : int ) -> "SparkExamplesIterable": SCREAMING_SNAKE_CASE__ : List[str] =self.split_shard_indices_by_worker(__lowercase , __lowercase ) return SparkExamplesIterable(self.df , partition_order=__lowercase ) @property def __magic_name__ ( self : int ) -> int: return len(self.partition_order ) class __SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ): snake_case_ = SparkConfig def __init__( self : Tuple , __lowercase : "pyspark.sql.DataFrame" , __lowercase : str = None , __lowercase : str = None , **__lowercase : List[str] , ) -> Dict: import pyspark SCREAMING_SNAKE_CASE__ : Union[str, Any] =pyspark.sql.SparkSession.builder.getOrCreate() SCREAMING_SNAKE_CASE__ : List[Any] =df SCREAMING_SNAKE_CASE__ : Dict =working_dir super().__init__( cache_dir=__lowercase , config_name=str(self.df.semanticHash() ) , **__lowercase , ) def __magic_name__ ( self : Tuple ) -> List[Any]: # Returns the path of the created file. def create_cache_and_write_probe(__lowercase : Any ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__lowercase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: SCREAMING_SNAKE_CASE__ : List[str] =( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowercase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def __magic_name__ ( self : Optional[Any] ) -> Dict: return datasets.DatasetInfo(features=self.config.features ) def __magic_name__ ( self : str , __lowercase : datasets.download.download_manager.DownloadManager ) -> List[str]: return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __magic_name__ ( self : Any , __lowercase : Any ) -> Tuple: import pyspark def get_arrow_batch_size(__lowercase : str ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.df.count() SCREAMING_SNAKE_CASE__ : Optional[Any] =df_num_rows if df_num_rows <= 1_00 else 1_00 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. SCREAMING_SNAKE_CASE__ : List[Any] =( self.df.limit(__lowercase ) .repartition(1 ) .mapInArrow(__lowercase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) SCREAMING_SNAKE_CASE__ : Optional[int] =approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. SCREAMING_SNAKE_CASE__ : int =min(__lowercase , int(approx_total_size / max_shard_size ) ) SCREAMING_SNAKE_CASE__ : Tuple =self.df.repartition(__lowercase ) def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : str , __lowercase : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark SCREAMING_SNAKE_CASE__ : str =ParquetWriter if file_format == '''parquet''' else ArrowWriter SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(self._working_dir , os.path.basename(__lowercase ) ) if self._working_dir else fpath SCREAMING_SNAKE_CASE__ : Optional[int] =file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. SCREAMING_SNAKE_CASE__ : Any =self.config.features SCREAMING_SNAKE_CASE__ : Any =self._writer_batch_size SCREAMING_SNAKE_CASE__ : Tuple =self._fs.storage_options def write_arrow(__lowercase : Optional[Any] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. SCREAMING_SNAKE_CASE__ : List[Any] =pyspark.TaskContext().taskAttemptId() SCREAMING_SNAKE_CASE__ : int =next(__lowercase , __lowercase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 SCREAMING_SNAKE_CASE__ : Optional[int] =writer_class( features=__lowercase , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , ) SCREAMING_SNAKE_CASE__ : List[Any] =pa.Table.from_batches([first_batch] ) writer.write_table(__lowercase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: SCREAMING_SNAKE_CASE__ : int =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 SCREAMING_SNAKE_CASE__ : List[Any] =writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , writer_batch_size=__lowercase , storage_options=__lowercase , embed_local_files=__lowercase , ) SCREAMING_SNAKE_CASE__ : str =pa.Table.from_batches([batch] ) writer.write_table(__lowercase ) if writer._num_bytes > 0: SCREAMING_SNAKE_CASE__ : str =writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__lowercase ) ): SCREAMING_SNAKE_CASE__ : str =os.path.join(os.path.dirname(__lowercase ) , os.path.basename(__lowercase ) ) shutil.move(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =( self.df.mapInArrow(__lowercase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __magic_name__ ( self : Union[str, Any] , __lowercase : "datasets.SplitGenerator" , __lowercase : str = "arrow" , __lowercase : Optional[Union[str, int]] = None , __lowercase : Optional[int] = None , **__lowercase : str , ) -> Any: self._validate_cache_dir() SCREAMING_SNAKE_CASE__ : Any =convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =not is_remote_filesystem(self._fs ) SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join if is_local else posixpath.join SCREAMING_SNAKE_CASE__ : Dict ='''-TTTTT-SSSSS-of-NNNNN''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =F"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" SCREAMING_SNAKE_CASE__ : Optional[Any] =path_join(self._output_dir , __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =0 SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : List[Any] =0 SCREAMING_SNAKE_CASE__ : Dict =[] SCREAMING_SNAKE_CASE__ : Tuple =[] for task_id, content in self._prepare_split_single(__lowercase , __lowercase , __lowercase ): ( SCREAMING_SNAKE_CASE__ ) : Any =content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =total_num_examples SCREAMING_SNAKE_CASE__ : List[Any] =total_num_bytes # should rename everything at the end logger.debug(F"Renaming {total_shards} shards." ) if total_shards > 1: SCREAMING_SNAKE_CASE__ : Dict =all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. SCREAMING_SNAKE_CASE__ : str =self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __lowercase : int , __lowercase : int , __lowercase : int , ): rename( __lowercase , fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace('''TTTTT-SSSSS''' , F"{global_shard_id:05d}" ).replace('''NNNNN''' , F"{total_shards:05d}" ) , ) SCREAMING_SNAKE_CASE__ : Dict =[] SCREAMING_SNAKE_CASE__ : Dict =0 for i in range(len(__lowercase ) ): SCREAMING_SNAKE_CASE__ : Optional[Any] =task_id_and_num_shards[i] for shard_id in range(__lowercase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__lowercase , len(__lowercase ) ).map(lambda __lowercase : _rename_shard(*__lowercase ) ).collect() else: # don't use any pattern SCREAMING_SNAKE_CASE__ : Optional[Any] =0 SCREAMING_SNAKE_CASE__ : Optional[Any] =task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , F"{shard_id:05d}" ).replace('''TTTTT''' , F"{task_id:05d}" ) , fpath.replace(__lowercase , '''''' ) , ) def __magic_name__ ( self : Union[str, Any] , __lowercase : "datasets.SplitGenerator" , ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df )
713
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if colsa != 1: SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if rowsa != rowsa: SCREAMING_SNAKE_CASE__ : str =( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(UpperCamelCase__ ) if len(UpperCamelCase__ ) != rowsa: SCREAMING_SNAKE_CASE__ : Union[str, Any] =( '''Number of initial values must be equal to number of rows in coefficient ''' f"matrix but received {len(UpperCamelCase__ )} and {rowsa}" ) raise ValueError(UpperCamelCase__ ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape strictly_diagonally_dominant(UpperCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =[] for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =0 for col in range(UpperCamelCase__ ): if col == row: SCREAMING_SNAKE_CASE__ : int =table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE__ : Any =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom new_val.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val return [float(UpperCamelCase__ ) for i in new_val] def _a( UpperCamelCase__ : NDArray[floataa] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape SCREAMING_SNAKE_CASE__ : Any =True for i in range(0, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int =0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) a_ = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : bool , __lowercase : str = None , __lowercase : list = None ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =None SCREAMING_SNAKE_CASE__ : int =os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) SCREAMING_SNAKE_CASE__ : Any =os.path.abspath('''examples''' ) for item in os.listdir(__lowercase ): if item not in EXCLUDE_EXAMPLES: SCREAMING_SNAKE_CASE__ : Any =os.path.join(__lowercase , __lowercase ) if os.path.isfile(__lowercase ) and ".py" in item_path: with self.subTest( tested_script=__lowercase , feature_script=__lowercase , tested_section='''main()''' if parser_only else '''training_function()''' , ): SCREAMING_SNAKE_CASE__ : Optional[Any] =compare_against_test( os.path.join(__lowercase , __lowercase ) , __lowercase , __lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] ='''\n'''.join(__lowercase ) if special_strings is not None: for string in special_strings: SCREAMING_SNAKE_CASE__ : List[str] =diff.replace(__lowercase , '''''' ) self.assertEqual(__lowercase , '''''' ) def __magic_name__ ( self : int ) -> int: self.one_complete_example('''complete_nlp_example.py''' , __lowercase ) self.one_complete_example('''complete_nlp_example.py''' , __lowercase ) def __magic_name__ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ : Union[str, Any] =os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] =[ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , __lowercase , __lowercase , __lowercase ) self.one_complete_example('''complete_cv_example.py''' , __lowercase , __lowercase , __lowercase ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = False @classmethod def __magic_name__ ( cls : Dict ) -> int: super().setUpClass() SCREAMING_SNAKE_CASE__ : List[Any] =tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) SCREAMING_SNAKE_CASE__ : Optional[int] =['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def __magic_name__ ( cls : Union[str, Any] ) -> Union[str, Any]: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __magic_name__ ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE__ : Dict =F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def __magic_name__ ( self : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : List[str] =F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() SCREAMING_SNAKE_CASE__ : List[Any] =run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() SCREAMING_SNAKE_CASE__ : str =run_command(self._launch_args + testargs , return_stdout=__lowercase ) self.assertNotIn('''epoch 0:''' , __lowercase ) self.assertIn('''epoch 1:''' , __lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : str =F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() SCREAMING_SNAKE_CASE__ : Union[str, Any] =run_command(self._launch_args + testargs , return_stdout=__lowercase ) if torch.cuda.is_available(): SCREAMING_SNAKE_CASE__ : Optional[int] =torch.cuda.device_count() else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , __lowercase ) self.assertIn('''epoch 1:''' , __lowercase ) else: self.assertIn('''epoch 0:''' , __lowercase ) self.assertIn('''epoch 1:''' , __lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] =''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): SCREAMING_SNAKE_CASE__ : List[Any] =run_command(self._launch_args + testargs , return_stdout=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =re.findall('''({.+})''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =[r for r in results if '''accuracy''' in r][-1] SCREAMING_SNAKE_CASE__ : Tuple =ast.literal_eval(__lowercase ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def __magic_name__ ( self : Tuple ) -> str: with tempfile.TemporaryDirectory() as tmpdir: SCREAMING_SNAKE_CASE__ : Optional[Any] =F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__lowercase , '''tracking''' ) ) ) def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict =['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def __magic_name__ ( self : Union[str, Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : str =['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
714
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' with open(UpperCamelCase__ ) as metadata_file: SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module'''] # Load the entity vocab file SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer''' with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0] SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0] SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name] SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias'''] SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key] else: SCREAMING_SNAKE_CASE__ : Any =state_dict[key] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(UpperCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' ) SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9) SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.''' SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist() SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE__ : Dict =[ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def _a( UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]'''] SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Optional[int] ={} for entry in data: SCREAMING_SNAKE_CASE__ : Tuple =entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE__ : str =entity_id break SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}" SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id return new_mapping if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) a_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
665
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """biogpt""" def __init__( self : Optional[int] , __lowercase : Any=4_23_84 , __lowercase : Optional[int]=10_24 , __lowercase : Tuple=24 , __lowercase : Any=16 , __lowercase : List[str]=40_96 , __lowercase : Optional[int]="gelu" , __lowercase : Optional[int]=0.1 , __lowercase : Any=0.1 , __lowercase : Dict=10_24 , __lowercase : Union[str, Any]=0.02 , __lowercase : List[str]=1e-12 , __lowercase : Tuple=True , __lowercase : List[str]=True , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Optional[int]=1 , __lowercase : int=0 , __lowercase : Any=2 , **__lowercase : str , ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : int =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Tuple =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : str =layer_norm_eps SCREAMING_SNAKE_CASE__ : Dict =scale_embedding SCREAMING_SNAKE_CASE__ : Dict =use_cache SCREAMING_SNAKE_CASE__ : str =layerdrop SCREAMING_SNAKE_CASE__ : Any =activation_dropout super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
715
'''simple docstring''' def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] =True for i in range(UpperCamelCase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] =True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[Any] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass a_ = (3, 9, -1_1, 0, 7, 5, 1, -1) a_ = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = 42 snake_case_ = 42 class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : Iterable[int] ) -> None: SCREAMING_SNAKE_CASE__ : Node | None =None for i in sorted(__lowercase , reverse=__lowercase ): SCREAMING_SNAKE_CASE__ : Any =Node(__lowercase , self.head ) def __iter__( self : Any ) -> Iterator[int]: SCREAMING_SNAKE_CASE__ : List[Any] =self.head while node: yield node.data SCREAMING_SNAKE_CASE__ : Any =node.next_node def __len__( self : List[Any] ) -> int: return sum(1 for _ in self ) def __str__( self : List[Any] ) -> str: return " -> ".join([str(__lowercase ) for node in self] ) def _a( UpperCamelCase__ : SortedLinkedList, UpperCamelCase__ : SortedLinkedList ): '''simple docstring''' return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() a_ = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
716
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =parent SCREAMING_SNAKE_CASE__ : Any =batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length SCREAMING_SNAKE_CASE__ : Dict =is_training SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids SCREAMING_SNAKE_CASE__ : List[Any] =use_labels SCREAMING_SNAKE_CASE__ : int =vocab_size SCREAMING_SNAKE_CASE__ : str =hidden_size SCREAMING_SNAKE_CASE__ : Any =embedding_size SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers SCREAMING_SNAKE_CASE__ : str =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range SCREAMING_SNAKE_CASE__ : str =num_labels SCREAMING_SNAKE_CASE__ : List[str] =num_choices SCREAMING_SNAKE_CASE__ : List[str] =scope def __magic_name__ ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int =None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : int =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =None SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Optional[int] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : List[str] ) -> Any: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =self.num_choices SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[Any] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__ ( self : str ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True # test_resize_embeddings = False snake_case_ = False def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self ) SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str ) -> Dict: self.config_tester.run_common_tests() def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase ) def __magic_name__ ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase ) def __magic_name__ ( self : Dict ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase ) def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase ) def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' return torch.tensor( UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, ) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__ ( self : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase ) model.to(__lowercase ) model.half() SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0] SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj] SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj] SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase ) self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
665
0
'''simple docstring''' from random import shuffle import tensorflow as tf from numpy import array def _a( UpperCamelCase__ : int, UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =int(UpperCamelCase__ ) assert noofclusters < len(UpperCamelCase__ ) # Find out the dimensionality SCREAMING_SNAKE_CASE__ : List[str] =len(vectors[0] ) # Will help select random centroids from among the available vectors SCREAMING_SNAKE_CASE__ : Optional[Any] =list(range(len(UpperCamelCase__ ) ) ) shuffle(UpperCamelCase__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. SCREAMING_SNAKE_CASE__ : Any =tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION SCREAMING_SNAKE_CASE__ : int =tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points SCREAMING_SNAKE_CASE__ : Dict =[ tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCamelCase__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values SCREAMING_SNAKE_CASE__ : str =tf.placeholder('''float64''', [dim] ) SCREAMING_SNAKE_CASE__ : Any =[] for centroid in centroids: cent_assigns.append(tf.assign(UpperCamelCase__, UpperCamelCase__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) SCREAMING_SNAKE_CASE__ : Tuple =[tf.Variable(0 ) for i in range(len(UpperCamelCase__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value SCREAMING_SNAKE_CASE__ : List[str] =tf.placeholder('''int32''' ) SCREAMING_SNAKE_CASE__ : List[str] =[] for assignment in assignments: cluster_assigns.append(tf.assign(UpperCamelCase__, UpperCamelCase__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input SCREAMING_SNAKE_CASE__ : str =tf.placeholder('''float''', [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors SCREAMING_SNAKE_CASE__ : List[str] =tf.reduce_mean(UpperCamelCase__, 0 ) ##Node for computing Euclidean distances # Placeholders for input SCREAMING_SNAKE_CASE__ : Any =tf.placeholder('''float''', [dim] ) SCREAMING_SNAKE_CASE__ : List[str] =tf.placeholder('''float''', [dim] ) SCREAMING_SNAKE_CASE__ : Any =tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCamelCase__, UpperCamelCase__ ), 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.placeholder('''float''', [noofclusters] ) SCREAMING_SNAKE_CASE__ : str =tf.argmin(UpperCamelCase__, 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. SCREAMING_SNAKE_CASE__ : List[str] =tf.initialize_all_variables() # Initialize all variables sess.run(UpperCamelCase__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. SCREAMING_SNAKE_CASE__ : Dict =1_0_0 for _ in range(UpperCamelCase__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(UpperCamelCase__ ) ): SCREAMING_SNAKE_CASE__ : Tuple =vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. SCREAMING_SNAKE_CASE__ : Union[str, Any] =[ sess.run(UpperCamelCase__, feed_dict={va: vect, va: sess.run(UpperCamelCase__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input SCREAMING_SNAKE_CASE__ : int =sess.run( UpperCamelCase__, feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n], feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(UpperCamelCase__ ): # Collect all the vectors assigned to this cluster SCREAMING_SNAKE_CASE__ : List[Any] =[ vectors[i] for i in range(len(UpperCamelCase__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location SCREAMING_SNAKE_CASE__ : int =sess.run( UpperCamelCase__, feed_dict={mean_input: array(UpperCamelCase__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n], feed_dict={centroid_value: new_location} ) # Return centroids and assignments SCREAMING_SNAKE_CASE__ : Tuple =sess.run(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =sess.run(UpperCamelCase__ ) return centroids, assignments
717
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args execute_subprocess_async(__lowercase , env=os.environ.copy() )
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
718
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = ShapEImgaImgPipeline snake_case_ = ["""image"""] snake_case_ = ["""image"""] snake_case_ = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[Any] ) -> List[Any]: return 32 @property def __magic_name__ ( self : List[str] ) -> Optional[int]: return 32 @property def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: return self.time_input_dim * 4 @property def __magic_name__ ( self : Dict ) -> Union[str, Any]: return 8 @property def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor( crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __magic_name__ ( self : List[str] ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : str ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase ) return model def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.dummy_prior SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , ) SCREAMING_SNAKE_CASE__ : Any ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any: SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : int ='''cpu''' SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] =np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __magic_name__ ( self : List[Any] ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __magic_name__ ( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu''' SCREAMING_SNAKE_CASE__ : Optional[Any] =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , ) def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =1 SCREAMING_SNAKE_CASE__ : List[str] =2 SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase ) for key in inputs.keys(): if key in self.batch_params: SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]] SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Optional[Any] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) SCREAMING_SNAKE_CASE__ : Dict =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple =pipe( __lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''', [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ], ) def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : List[str], ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any ={ '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } SCREAMING_SNAKE_CASE__ : List[Any] =input_paths_and_base_extractors[compression_format] if input_path is None: SCREAMING_SNAKE_CASE__ : Dict =f"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase__ ) assert base_extractor.is_extractable(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(UpperCamelCase__, UpperCamelCase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name SCREAMING_SNAKE_CASE__ : List[Any] =file_path.read_text(encoding='''utf-8''' ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =output_path.read_text(encoding='''utf-8''' ) SCREAMING_SNAKE_CASE__ : Dict =text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''', [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ], ) def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str], ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any ={ '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } SCREAMING_SNAKE_CASE__ : Optional[int] =input_paths[compression_format] if input_path is None: SCREAMING_SNAKE_CASE__ : str =f"for '{compression_format}' compression_format, " if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Dict =Extractor.infer_extractor_format(UpperCamelCase__ ) assert extractor_format is not None SCREAMING_SNAKE_CASE__ : Tuple =tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name SCREAMING_SNAKE_CASE__ : str =file_path.read_text(encoding='''utf-8''' ) else: SCREAMING_SNAKE_CASE__ : Optional[int] =output_path.read_text(encoding='''utf-8''' ) SCREAMING_SNAKE_CASE__ : int =text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any ): '''simple docstring''' import tarfile SCREAMING_SNAKE_CASE__ : Any =tmp_path / '''data_dot_dot''' directory.mkdir() SCREAMING_SNAKE_CASE__ : Union[str, Any] =directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(UpperCamelCase__, '''w''' ) as f: f.add(UpperCamelCase__, arcname=os.path.join('''..''', text_file.name ) ) return path @pytest.fixture def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' import tarfile SCREAMING_SNAKE_CASE__ : Optional[int] =tmp_path / '''data_sym_link''' directory.mkdir() SCREAMING_SNAKE_CASE__ : List[Any] =directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''', directory / '''subdir''', target_is_directory=UpperCamelCase__ ) with tarfile.TarFile(UpperCamelCase__, '''w''' ) as f: f.add(str(directory / '''subdir''' ), arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''', [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')], ) def _a( UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] ={ '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } SCREAMING_SNAKE_CASE__ : List[str] =insecure_tar_files[insecure_tar_file] SCREAMING_SNAKE_CASE__ : Tuple =tmp_path / '''extracted''' TarExtractor.extract(UpperCamelCase__, UpperCamelCase__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict =tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 SCREAMING_SNAKE_CASE__ : Dict =( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(UpperCamelCase__ ) assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
719
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_bigcode""" snake_case_ = ["""past_key_values"""] snake_case_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions SCREAMING_SNAKE_CASE__ : Dict =n_embd SCREAMING_SNAKE_CASE__ : Dict =n_layer SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head SCREAMING_SNAKE_CASE__ : List[str] =n_inner SCREAMING_SNAKE_CASE__ : List[str] =activation_function SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon SCREAMING_SNAKE_CASE__ : List[str] =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : Dict =multi_query SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
665
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : str , *__lowercase : int , **__lowercase : List[Any] ) -> None: warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , __lowercase , ) super().__init__(*__lowercase , **__lowercase )
720
'''simple docstring''' class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Union[str, Any] =size SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size SCREAMING_SNAKE_CASE__ : str =[0] * size @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return index | (index + 1) @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return (index & (index + 1)) - 1 def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =value while index < self.size: SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1 if current_left_border == index: SCREAMING_SNAKE_CASE__ : List[str] =value else: SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase ) def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int: right -= 1 # Because of right is exclusive SCREAMING_SNAKE_CASE__ : str =0 while left <= right: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase ) if left <= current_left: SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] ) SCREAMING_SNAKE_CASE__ : Any =current_left else: SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
665
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a_ = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
721
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = ["""vqvae"""] def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int: super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase ) def __magic_name__ ( self : List[str] ) -> int: return 50 if isinstance(self.scheduler , __lowercase ) else 10_00 @torch.no_grad() def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps() self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowercase , device=self.device , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =noise SCREAMING_SNAKE_CASE__ : List[str] =None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample( generator=__lowercase )[0] SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images if start_step > 0: SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowercase ): SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample'''] else: SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample'''] if isinstance(self.scheduler , __lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample'''] else: SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start] if mask_end > 0: SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy() SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' ) SCREAMING_SNAKE_CASE__ : Any =list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) ) @torch.no_grad() def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __lowercase ) self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t] SCREAMING_SNAKE_CASE__ : int =( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor: SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
665
0
import functools def _a( UpperCamelCase__ : list[int], UpperCamelCase__ : list[int] ): '''simple docstring''' if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or not all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(UpperCamelCase__ ) != 3 or not all(isinstance(UpperCamelCase__, UpperCamelCase__ ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(UpperCamelCase__ ) == 0: return 0 if min(UpperCamelCase__ ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(UpperCamelCase__ ) >= 3_6_6: raise ValueError('''All days elements should be less than 366''' ) SCREAMING_SNAKE_CASE__ : Dict =set(UpperCamelCase__ ) @functools.cache def dynamic_programming(UpperCamelCase__ : int ) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ), costs[1] + dynamic_programming(index + 7 ), costs[2] + dynamic_programming(index + 3_0 ), ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
700
'''simple docstring''' from math import isqrt def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =[True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Any =False return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]] def _a( UpperCamelCase__ : int = 1_0**8 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' from math import sqrt def _a( UpperCamelCase__ : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(sqrt(UpperCamelCase__ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _a( UpperCamelCase__ : int = 1_0_0_0_1 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Dict =1 while count != nth and number < 3: number += 1 if is_prime(UpperCamelCase__ ): count += 1 while count != nth: number += 2 if is_prime(UpperCamelCase__ ): count += 1 return number if __name__ == "__main__": print(F'''{solution() = }''')
701
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = SpeechTaTokenizer snake_case_ = False snake_case_ = True def __magic_name__ ( self : int ) -> Any: super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test''' SCREAMING_SNAKE_CASE__ : int ='''this is a test''' return input_text, output_text def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase ) return text, ids def __magic_name__ ( self : Dict ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>''' SCREAMING_SNAKE_CASE__ : Optional[int] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__lowercase ) , 81 ) def __magic_name__ ( self : Dict ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __magic_name__ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Any =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : int =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size_a + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __magic_name__ ( self : Optional[Any] ) -> Any: pass def __magic_name__ ( self : List[str] ) -> List[Any]: pass def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase ) # fmt: off self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def __magic_name__ ( self : List[str] ) -> List[str]: # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE__ : List[Any] =[ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off SCREAMING_SNAKE_CASE__ : str ={ '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
665
0
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} a_ = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } a_ = { 'allenai/longformer-base-4096': 4_0_9_6, 'allenai/longformer-large-4096': 4_0_9_6, 'allenai/longformer-large-4096-finetuned-triviaqa': 4_0_9_6, 'allenai/longformer-base-4096-extra.pos.embd.only': 4_0_9_6, 'allenai/longformer-large-4096-extra.pos.embd.only': 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _a( ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str =( list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) ) ) SCREAMING_SNAKE_CASE__ : List[str] =bs[:] SCREAMING_SNAKE_CASE__ : Any =0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase__ ) cs.append(2**8 + n ) n += 1 SCREAMING_SNAKE_CASE__ : int =[chr(UpperCamelCase__ ) for n in cs] return dict(zip(UpperCamelCase__, UpperCamelCase__ ) ) def _a( UpperCamelCase__ : Optional[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =set() SCREAMING_SNAKE_CASE__ : Optional[Any] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) SCREAMING_SNAKE_CASE__ : int =char return pairs class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[Any]="replace" , __lowercase : Tuple="<s>" , __lowercase : List[Any]="</s>" , __lowercase : Any="</s>" , __lowercase : Dict="<s>" , __lowercase : Dict="<unk>" , __lowercase : Optional[Any]="<pad>" , __lowercase : Optional[int]="<mask>" , __lowercase : Union[str, Any]=False , **__lowercase : str , ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token SCREAMING_SNAKE_CASE__ : int =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : Dict =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) with open(__lowercase , encoding='''utf-8''' ) as vocab_handle: SCREAMING_SNAKE_CASE__ : str =json.load(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] ={v: k for k, v in self.encoder.items()} SCREAMING_SNAKE_CASE__ : Dict =errors # how to handle errors in decoding SCREAMING_SNAKE_CASE__ : Optional[int] =bytes_to_unicode() SCREAMING_SNAKE_CASE__ : List[str] ={v: k for k, v in self.byte_encoder.items()} with open(__lowercase , encoding='''utf-8''' ) as merges_handle: SCREAMING_SNAKE_CASE__ : int =merges_handle.read().split('''\n''' )[1:-1] SCREAMING_SNAKE_CASE__ : Optional[int] =[tuple(merge.split() ) for merge in bpe_merges] SCREAMING_SNAKE_CASE__ : Union[str, Any] =dict(zip(__lowercase , range(len(__lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ={} SCREAMING_SNAKE_CASE__ : Optional[Any] =add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions SCREAMING_SNAKE_CASE__ : Optional[Any] =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def __magic_name__ ( self : Optional[Any] ) -> List[Any]: return len(self.encoder ) def __magic_name__ ( self : Optional[int] ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> Tuple: if token in self.cache: return self.cache[token] SCREAMING_SNAKE_CASE__ : Any =tuple(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_pairs(__lowercase ) if not pairs: return token while True: SCREAMING_SNAKE_CASE__ : Any =min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break SCREAMING_SNAKE_CASE__ : Optional[Any] =bigram SCREAMING_SNAKE_CASE__ : List[str] =[] SCREAMING_SNAKE_CASE__ : List[Any] =0 while i < len(__lowercase ): try: SCREAMING_SNAKE_CASE__ : Any =word.index(__lowercase , __lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) SCREAMING_SNAKE_CASE__ : Optional[int] =j if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 SCREAMING_SNAKE_CASE__ : Tuple =tuple(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =new_word if len(__lowercase ) == 1: break else: SCREAMING_SNAKE_CASE__ : Optional[int] =get_pairs(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =''' '''.join(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =word return word def __magic_name__ ( self : Dict , __lowercase : List[Any] ) -> Any: SCREAMING_SNAKE_CASE__ : Any =[] for token in re.findall(self.pat , __lowercase ): SCREAMING_SNAKE_CASE__ : List[str] =''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) ) return bpe_tokens def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]: return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def __magic_name__ ( self : Tuple , __lowercase : Dict ) -> Any: return self.decoder.get(__lowercase ) def __magic_name__ ( self : Any , __lowercase : str ) -> Any: SCREAMING_SNAKE_CASE__ : List[Any] =''''''.join(__lowercase ) SCREAMING_SNAKE_CASE__ : int =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def __magic_name__ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE__ : Dict =os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ : Dict =os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =0 with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) SCREAMING_SNAKE_CASE__ : Dict =token_index writer.write(''' '''.join(__lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file def __magic_name__ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : str =[self.cls_token_id] SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __magic_name__ ( self : List[str] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : Tuple =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Optional[int]=False , **__lowercase : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[int] =kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()): SCREAMING_SNAKE_CASE__ : int =''' ''' + text return (text, kwargs)
702
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =parent SCREAMING_SNAKE_CASE__ : List[str] =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : List[Any] =num_channels SCREAMING_SNAKE_CASE__ : int =patch_size SCREAMING_SNAKE_CASE__ : Tuple =num_frames SCREAMING_SNAKE_CASE__ : List[Any] =is_training SCREAMING_SNAKE_CASE__ : List[str] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_type SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : Any =scope SCREAMING_SNAKE_CASE__ : int =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : int =self.get_config() return config, pixel_values, labels def __magic_name__ ( self : int ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels return config def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) # verify the logits shape SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowercase ) def __magic_name__ ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : str ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester( self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int: SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : List[Any] ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ) -> Optional[int]: pass def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def __magic_name__ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> List[str]: if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames SCREAMING_SNAKE_CASE__ : Optional[Any] =True SCREAMING_SNAKE_CASE__ : str =False SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ : List[Any] =True SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__ : Optional[int] =True SCREAMING_SNAKE_CASE__ : Union[str, Any] =True SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) self.assertEqual(out_len + 1 , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self : Tuple ) -> List[Any]: def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ): SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowercase ) , __lowercase ) SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : List[str] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : Any ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor SCREAMING_SNAKE_CASE__ : Tuple =prepare_video() SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
665
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Any , __lowercase : Dict , __lowercase : Optional[int]=7 , __lowercase : Dict=3 , __lowercase : Optional[Any]=18 , __lowercase : int=30 , __lowercase : Dict=4_00 , __lowercase : Any=True , __lowercase : Optional[Any]=None , __lowercase : Optional[int]=True , __lowercase : Optional[int]=None , __lowercase : str=True , ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else {'''shortest_edge''': 20} SCREAMING_SNAKE_CASE__ : Optional[Any] =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} SCREAMING_SNAKE_CASE__ : List[Any] =parent SCREAMING_SNAKE_CASE__ : int =batch_size SCREAMING_SNAKE_CASE__ : Any =num_channels SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : List[str] =min_resolution SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_resolution SCREAMING_SNAKE_CASE__ : Tuple =do_resize SCREAMING_SNAKE_CASE__ : str =size SCREAMING_SNAKE_CASE__ : int =do_center_crop SCREAMING_SNAKE_CASE__ : str =crop_size SCREAMING_SNAKE_CASE__ : Optional[int] =do_flip_channel_order def __magic_name__ ( self : str ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = MobileViTImageProcessor if is_vision_available() else None def __magic_name__ ( self : Union[str, Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[Any] =MobileViTImageProcessingTester(self ) @property def __magic_name__ ( self : Union[str, Any] ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : List[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__lowercase , '''center_crop''' ) ) self.assertTrue(hasattr(__lowercase , '''do_flip_channel_order''' ) ) def __magic_name__ ( self : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : int =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) SCREAMING_SNAKE_CASE__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __magic_name__ ( self : List[Any] ) -> Union[str, Any]: pass def __magic_name__ ( self : Any ) -> Optional[Any]: # Initialize image_processing SCREAMING_SNAKE_CASE__ : Any =self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE__ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __magic_name__ ( self : Dict ) -> List[Any]: # Initialize image_processing SCREAMING_SNAKE_CASE__ : Dict =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : Optional[int] =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __magic_name__ ( self : str ) -> Dict: # Initialize image_processing SCREAMING_SNAKE_CASE__ : Optional[int] =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE__ : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE__ : str =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched SCREAMING_SNAKE_CASE__ : Any =image_processing(__lowercase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
703
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } a_ = { 'bert-base-uncased': 5_1_2, 'bert-large-uncased': 5_1_2, 'bert-base-cased': 5_1_2, 'bert-large-cased': 5_1_2, 'bert-base-multilingual-uncased': 5_1_2, 'bert-base-multilingual-cased': 5_1_2, 'bert-base-chinese': 5_1_2, 'bert-base-german-cased': 5_1_2, 'bert-large-uncased-whole-word-masking': 5_1_2, 'bert-large-cased-whole-word-masking': 5_1_2, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-base-cased-finetuned-mrpc': 5_1_2, 'bert-base-german-dbmdz-cased': 5_1_2, 'bert-base-german-dbmdz-uncased': 5_1_2, 'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2, 'wietsedv/bert-base-dutch-cased': 5_1_2, } a_ = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = BertTokenizer def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]: super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE__ : Any =do_lower_case SCREAMING_SNAKE_CASE__ : Any =strip_accents SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
665
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __lowercase : List[str] , __lowercase : Optional[Any]=13 , __lowercase : List[str]=7 , __lowercase : Dict=True , __lowercase : str=True , __lowercase : List[str]=False , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=99 , __lowercase : Optional[int]=32 , __lowercase : Any=5 , __lowercase : List[Any]=4 , __lowercase : Optional[Any]=37 , __lowercase : Optional[Any]="gelu" , __lowercase : str=0.1 , __lowercase : List[str]=0.1 , __lowercase : Tuple=5_12 , __lowercase : Optional[Any]=16 , __lowercase : List[Any]=2 , __lowercase : List[Any]=0.02 , __lowercase : List[Any]=3 , __lowercase : Tuple=4 , __lowercase : List[Any]=None , ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] =parent SCREAMING_SNAKE_CASE__ : Any =batch_size SCREAMING_SNAKE_CASE__ : Dict =seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] =is_training SCREAMING_SNAKE_CASE__ : Dict =use_input_mask SCREAMING_SNAKE_CASE__ : Tuple =use_token_type_ids SCREAMING_SNAKE_CASE__ : Optional[int] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =vocab_size SCREAMING_SNAKE_CASE__ : Any =hidden_size SCREAMING_SNAKE_CASE__ : Any =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] =type_vocab_size SCREAMING_SNAKE_CASE__ : Tuple =type_sequence_label_size SCREAMING_SNAKE_CASE__ : str =initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] =num_labels SCREAMING_SNAKE_CASE__ : int =num_choices SCREAMING_SNAKE_CASE__ : List[str] =scope def __magic_name__ ( self : Union[str, Any] ) -> List[str]: SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int =None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Tuple =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Optional[int] =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : int =None SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Union[str, Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : Tuple ) -> Tuple: return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =LlamaModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , attention_mask=__lowercase ) SCREAMING_SNAKE_CASE__ : int =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , ) -> Dict: SCREAMING_SNAKE_CASE__ : int =True SCREAMING_SNAKE_CASE__ : int =LlamaModel(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , attention_mask=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : List[Any] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Any , __lowercase : str , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : List[str] , ) -> int: SCREAMING_SNAKE_CASE__ : Union[str, Any] =LlamaForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Tuple , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Any , __lowercase : int , __lowercase : Optional[Any] , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Optional[int] =True SCREAMING_SNAKE_CASE__ : Any =LlamaForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() # first forward pass SCREAMING_SNAKE_CASE__ : Any =model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , ) SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ : int =torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0] SCREAMING_SNAKE_CASE__ : Any =model( __lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )['''hidden_states'''][0] # select random slice SCREAMING_SNAKE_CASE__ : str =ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ : str =output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ : Tuple =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) ) def __magic_name__ ( self : Optional[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any =self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE__ ) : Union[str, Any] =config_and_inputs SCREAMING_SNAKE_CASE__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () snake_case_ = (LlamaForCausalLM,) if is_torch_available() else () snake_case_ = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : str =LlamaModelTester(self ) SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def __magic_name__ ( self : Dict ) -> str: self.config_tester.run_common_tests() def __magic_name__ ( self : List[str] ) -> List[str]: SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE__ : str =type self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =3 SCREAMING_SNAKE_CASE__ : Optional[Any] =input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : Any =input_ids.ne(1 ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Any =LlamaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : str =model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __magic_name__ ( self : Union[str, Any] ) -> Any: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict =3 SCREAMING_SNAKE_CASE__ : List[str] ='''single_label_classification''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : Any =input_ids.ne(1 ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Any =LlamaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =3 SCREAMING_SNAKE_CASE__ : Tuple ='''multi_label_classification''' SCREAMING_SNAKE_CASE__ : Dict =input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : str =input_ids.ne(1 ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ : List[str] =LlamaForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , labels=__lowercase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def __magic_name__ ( self : Any ) -> str: pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def __magic_name__ ( self : Tuple , __lowercase : Optional[int] ) -> Any: SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : int =ids_tensor([1, 10] , config.vocab_size ) SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE__ : Tuple =LlamaModel(__lowercase ) original_model.to(__lowercase ) original_model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =original_model(__lowercase ).last_hidden_state SCREAMING_SNAKE_CASE__ : Any =original_model(__lowercase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''type''': scaling_type, '''factor''': 10.0} SCREAMING_SNAKE_CASE__ : Union[str, Any] =LlamaModel(__lowercase ) scaled_model.to(__lowercase ) scaled_model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =scaled_model(__lowercase ).last_hidden_state SCREAMING_SNAKE_CASE__ : Optional[Any] =scaled_model(__lowercase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1e-5 ) ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : str =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] SCREAMING_SNAKE_CASE__ : Optional[Any] =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def __magic_name__ ( self : Optional[Any] ) -> List[str]: SCREAMING_SNAKE_CASE__ : str =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] SCREAMING_SNAKE_CASE__ : List[Any] =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(torch.tensor(__lowercase ) ) # Expected mean on dim = -1 SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off SCREAMING_SNAKE_CASE__ : Tuple =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def __magic_name__ ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : List[Any] =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] SCREAMING_SNAKE_CASE__ : Optional[Any] =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE__ : str =model(torch.tensor(__lowercase ) ) # Expected mean on dim = -1 SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off SCREAMING_SNAKE_CASE__ : str =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1e-2 , rtol=1e-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =[1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38] SCREAMING_SNAKE_CASE__ : List[Any] =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE__ : int =model(torch.tensor(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __lowercase , atol=1e-2 , rtol=1e-2 ) # fmt: off SCREAMING_SNAKE_CASE__ : Dict =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __lowercase , atol=1e-5 , rtol=1e-5 ) @unittest.skip('''Model is curently gated''' ) @slow def __magic_name__ ( self : List[Any] ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[int] ='''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' SCREAMING_SNAKE_CASE__ : Dict ='''Simply put, the theory of relativity states that ''' SCREAMING_SNAKE_CASE__ : Optional[int] =LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) SCREAMING_SNAKE_CASE__ : Any =tokenizer.encode(__lowercase , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : Dict =LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=__lowercase ) # greedy generation outputs SCREAMING_SNAKE_CASE__ : int =model.generate(__lowercase , max_new_tokens=64 , top_p=__lowercase , temperature=1 , do_sample=__lowercase ) SCREAMING_SNAKE_CASE__ : int =tokenizer.decode(generated_ids[0] , skip_special_tokens=__lowercase ) self.assertEqual(__lowercase , __lowercase )
704
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a_ = False a_ = False def _a( UpperCamelCase__ : Namespace ): '''simple docstring''' return TrainCommand(UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): @staticmethod def __magic_name__ ( __lowercase : ArgumentParser ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' ) SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =args.output SCREAMING_SNAKE_CASE__ : str =args.column_label SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text SCREAMING_SNAKE_CASE__ : Tuple =args.column_id self.logger.info(F"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"Loading dataset from {args.train_data}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =None if args.validation_data: self.logger.info(F"Loading validation dataset from {args.validation_data}" ) SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon def __magic_name__ ( self : Any ) -> str: if self.framework == "tf": return self.run_tf() return self.run_torch() def __magic_name__ ( self : Optional[int] ) -> Tuple: raise NotImplementedError def __magic_name__ ( self : Dict ) -> List[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
665
0
'''simple docstring''' import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =0 def __magic_name__ ( self : str ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__lowercase , __lowercase ) def __magic_name__ ( self : Optional[Any] ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : List[Any] =Path(__lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : int =Path(__lowercase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) def __magic_name__ ( self : Any ) -> List[Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(__lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : List[str] =Path(__lowercase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Any =CLIPConfig() # Create a dummy config file with image_proceesor_type SCREAMING_SNAKE_CASE__ : Optional[int] =Path(__lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : str =Path(__lowercase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained(__lowercase ).to_dict() config_dict.pop('''image_processor_type''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =CLIPImageProcessor(**__lowercase ) # save in new folder model_config.save_pretrained(__lowercase ) config.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(__lowercase ) # make sure private variable is not incorrectly saved SCREAMING_SNAKE_CASE__ : Any =json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__lowercase , __lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Union[str, Any] =Path(__lowercase ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , ) SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) def __magic_name__ ( self : Optional[int] ) -> List[Any]: with self.assertRaisesRegex( __lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ): SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained('''clip-base''' ) def __magic_name__ ( self : Optional[int] ) -> List[Any]: with self.assertRaisesRegex( __lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): SCREAMING_SNAKE_CASE__ : List[Any] =AutoImageProcessor.from_pretrained(__lowercase , revision='''aaaaaa''' ) def __magic_name__ ( self : Tuple ) -> Optional[int]: with self.assertRaisesRegex( __lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): SCREAMING_SNAKE_CASE__ : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __magic_name__ ( self : Dict ) -> str: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowercase ): SCREAMING_SNAKE_CASE__ : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase ) SCREAMING_SNAKE_CASE__ : int =AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =AutoImageProcessor.from_pretrained(__lowercase , trust_remote_code=__lowercase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def __magic_name__ ( self : int ) -> Any: try: AutoConfig.register('''custom''' , __lowercase ) AutoImageProcessor.register(__lowercase , __lowercase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowercase ): AutoImageProcessor.register(__lowercase , __lowercase ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Optional[int] =Path(__lowercase ) / '''preprocessor_config.json''' SCREAMING_SNAKE_CASE__ : Dict =Path(__lowercase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowercase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowercase , '''w''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =CustomImageProcessor.from_pretrained(__lowercase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained(__lowercase ) self.assertIsInstance(__lowercase , __lowercase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __magic_name__ ( self : str ) -> str: class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = True try: AutoConfig.register('''custom''' , __lowercase ) AutoImageProcessor.register(__lowercase , __lowercase ) # If remote code is not set, the default is to use local SCREAMING_SNAKE_CASE__ : Any =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. SCREAMING_SNAKE_CASE__ : Tuple =AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub SCREAMING_SNAKE_CASE__ : str =AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowercase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__lowercase , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
705
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""] snake_case_ = [ """image_embeds""", """negative_image_embeds""", """image""", ] snake_case_ = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[str] ) -> Tuple: return 32 @property def __magic_name__ ( self : List[str] ) -> str: return 32 @property def __magic_name__ ( self : Any ) -> Optional[int]: return self.time_input_dim @property def __magic_name__ ( self : List[Any] ) -> int: return self.time_input_dim * 4 @property def __magic_name__ ( self : Tuple ) -> Optional[int]: return 1_00 @property def __magic_name__ ( self : Union[str, Any] ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase ) return model @property def __magic_name__ ( self : Dict ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs ) return model def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : str ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu''' SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple =np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : int =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = 42 @flax_register_to_config class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCamelCase , lowerCamelCase ): snake_case_ = 32 snake_case_ = 4 snake_case_ = 4 snake_case_ = ( """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""", """DownBlock2D""", ) snake_case_ = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""") snake_case_ = False snake_case_ = (320, 640, 1280, 1280) snake_case_ = 2 snake_case_ = 8 snake_case_ = None snake_case_ = 1280 snake_case_ = 0.0 snake_case_ = False snake_case_ = jnp.floataa snake_case_ = True snake_case_ = 0 snake_case_ = False def __magic_name__ ( self : List[Any] , __lowercase : jax.random.KeyArray ) -> FrozenDict: # init input tensors SCREAMING_SNAKE_CASE__ : int =(1, self.in_channels, self.sample_size, self.sample_size) SCREAMING_SNAKE_CASE__ : List[Any] =jnp.zeros(__lowercase , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ : int =jnp.ones((1,) , dtype=jnp.intaa ) SCREAMING_SNAKE_CASE__ : List[Any] =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ : List[str] =jax.random.split(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] ={'''params''': params_rng, '''dropout''': dropout_rng} return self.init(__lowercase , __lowercase , __lowercase , __lowercase )["params"] def __magic_name__ ( self : List[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : int =self.block_out_channels SCREAMING_SNAKE_CASE__ : Optional[Any] =block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( '''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.num_attention_heads or self.attention_head_dim # input SCREAMING_SNAKE_CASE__ : Tuple =nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time SCREAMING_SNAKE_CASE__ : Optional[int] =FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) SCREAMING_SNAKE_CASE__ : Optional[int] =FlaxTimestepEmbedding(__lowercase , dtype=self.dtype ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.only_cross_attention if isinstance(__lowercase , __lowercase ): SCREAMING_SNAKE_CASE__ : List[Any] =(only_cross_attention,) * len(self.down_block_types ) if isinstance(__lowercase , __lowercase ): SCREAMING_SNAKE_CASE__ : Any =(num_attention_heads,) * len(self.down_block_types ) # down SCREAMING_SNAKE_CASE__ : Union[str, Any] =[] SCREAMING_SNAKE_CASE__ : Optional[Any] =block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): SCREAMING_SNAKE_CASE__ : List[str] =output_channel SCREAMING_SNAKE_CASE__ : Tuple =block_out_channels[i] SCREAMING_SNAKE_CASE__ : Optional[int] =i == len(__lowercase ) - 1 if down_block_type == "CrossAttnDownBlock2D": SCREAMING_SNAKE_CASE__ : Tuple =FlaxCrossAttnDownBlockaD( in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE__ : str =FlaxDownBlockaD( in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(__lowercase ) SCREAMING_SNAKE_CASE__ : int =down_blocks # mid SCREAMING_SNAKE_CASE__ : List[Any] =FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up SCREAMING_SNAKE_CASE__ : int =[] SCREAMING_SNAKE_CASE__ : Any =list(reversed(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =list(reversed(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Dict =list(reversed(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): SCREAMING_SNAKE_CASE__ : Optional[int] =output_channel SCREAMING_SNAKE_CASE__ : List[Any] =reversed_block_out_channels[i] SCREAMING_SNAKE_CASE__ : List[Any] =reversed_block_out_channels[min(i + 1 , len(__lowercase ) - 1 )] SCREAMING_SNAKE_CASE__ : Optional[int] =i == len(__lowercase ) - 1 if up_block_type == "CrossAttnUpBlock2D": SCREAMING_SNAKE_CASE__ : List[Any] =FlaxCrossAttnUpBlockaD( in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: SCREAMING_SNAKE_CASE__ : List[Any] =FlaxUpBlockaD( in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =output_channel SCREAMING_SNAKE_CASE__ : Optional[Any] =up_blocks # out SCREAMING_SNAKE_CASE__ : Optional[int] =nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) SCREAMING_SNAKE_CASE__ : str =nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : str , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , __lowercase : bool = True , __lowercase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]: # 1. time if not isinstance(__lowercase , jnp.ndarray ): SCREAMING_SNAKE_CASE__ : List[Any] =jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(__lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE__ : Optional[Any] =timesteps.astype(dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ : List[Any] =jnp.expand_dims(__lowercase , 0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.time_proj(__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =self.time_embedding(__lowercase ) # 2. pre-process SCREAMING_SNAKE_CASE__ : str =jnp.transpose(__lowercase , (0, 2, 3, 1) ) SCREAMING_SNAKE_CASE__ : Tuple =self.conv_in(__lowercase ) # 3. down SCREAMING_SNAKE_CASE__ : Tuple =(sample,) for down_block in self.down_blocks: if isinstance(__lowercase , __lowercase ): SCREAMING_SNAKE_CASE__ : str =down_block(__lowercase , __lowercase , __lowercase , deterministic=not train ) else: SCREAMING_SNAKE_CASE__ : Any =down_block(__lowercase , __lowercase , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: SCREAMING_SNAKE_CASE__ : int =() for down_block_res_sample, down_block_additional_residual in zip( __lowercase , __lowercase ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) SCREAMING_SNAKE_CASE__ : Dict =new_down_block_res_samples # 4. mid SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.mid_block(__lowercase , __lowercase , __lowercase , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: SCREAMING_SNAKE_CASE__ : List[str] =down_block_res_samples[-(self.layers_per_block + 1) :] SCREAMING_SNAKE_CASE__ : Optional[Any] =down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(__lowercase , __lowercase ): SCREAMING_SNAKE_CASE__ : Optional[int] =up_block( __lowercase , temb=__lowercase , encoder_hidden_states=__lowercase , res_hidden_states_tuple=__lowercase , deterministic=not train , ) else: SCREAMING_SNAKE_CASE__ : int =up_block(__lowercase , temb=__lowercase , res_hidden_states_tuple=__lowercase , deterministic=not train ) # 6. post-process SCREAMING_SNAKE_CASE__ : List[Any] =self.conv_norm_out(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =nn.silu(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.conv_out(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =jnp.transpose(__lowercase , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=__lowercase )
706
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): snake_case_ = 42 # Cache store of keys snake_case_ = 42 # References of the keys in cache snake_case_ = 10 # Maximum capacity of cache def __init__( self : Dict , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Any =deque() SCREAMING_SNAKE_CASE__ : str =set() if not n: SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =n def __magic_name__ ( self : List[str] , __lowercase : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop() self.key_reference.remove(__lowercase ) else: self.dq_store.remove(__lowercase ) self.dq_store.appendleft(__lowercase ) self.key_reference.add(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__lowercase ) def __repr__( self : List[Any] ) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}" if __name__ == "__main__": import doctest doctest.testmod() a_ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
665
0
'''simple docstring''' import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _a( UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[str] =t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0 return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}" def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict=3_0_0 ): '''simple docstring''' return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n " def _a( UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] ='''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: SCREAMING_SNAKE_CASE__ : List[Any] =f"{elt:.6f}" if isinstance(UpperCamelCase__, UpperCamelCase__ ) else str(UpperCamelCase__ ) html_code += f" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __SCREAMING_SNAKE_CASE : snake_case_ = 5 snake_case_ = 0.2 def __init__( self : str , __lowercase : int , __lowercase : Optional[str] = None , __lowercase : bool = True , __lowercase : Optional["NotebookTrainingTracker"] = None , __lowercase : int = 3_00 , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : List[Any] =total SCREAMING_SNAKE_CASE__ : List[str] ='''''' if prefix is None else prefix SCREAMING_SNAKE_CASE__ : Tuple =leave SCREAMING_SNAKE_CASE__ : Optional[Any] =parent SCREAMING_SNAKE_CASE__ : List[Any] =width SCREAMING_SNAKE_CASE__ : List[Any] =None SCREAMING_SNAKE_CASE__ : List[str] =None SCREAMING_SNAKE_CASE__ : str =None def __magic_name__ ( self : int , __lowercase : int , __lowercase : bool = False , __lowercase : str = None ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Tuple =value if comment is not None: SCREAMING_SNAKE_CASE__ : Tuple =comment if self.last_value is None: SCREAMING_SNAKE_CASE__ : str =time.time() SCREAMING_SNAKE_CASE__ : Union[str, Any] =value SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Any =self.warmup SCREAMING_SNAKE_CASE__ : Any =1 self.update_bar(__lowercase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 SCREAMING_SNAKE_CASE__ : Optional[int] =time.time() SCREAMING_SNAKE_CASE__ : Any =current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: SCREAMING_SNAKE_CASE__ : Any =self.elapsed_time / (value - self.start_value) else: SCREAMING_SNAKE_CASE__ : Dict =None if value >= self.total: SCREAMING_SNAKE_CASE__ : int =self.total SCREAMING_SNAKE_CASE__ : Optional[int] =None if not self.leave: self.close() elif self.average_time_per_item is not None: SCREAMING_SNAKE_CASE__ : List[Any] =self.average_time_per_item * (self.total - value) self.update_bar(__lowercase ) SCREAMING_SNAKE_CASE__ : int =value SCREAMING_SNAKE_CASE__ : Optional[int] =current_time if self.average_time_per_item is None: SCREAMING_SNAKE_CASE__ : List[str] =1 else: SCREAMING_SNAKE_CASE__ : int =max(int(self.update_every / self.average_time_per_item ) , 1 ) def __magic_name__ ( self : str , __lowercase : List[str] , __lowercase : Union[str, Any]=None ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =''' ''' * (len(str(self.total ) ) - len(str(__lowercase ) )) + str(__lowercase ) if self.elapsed_time is None: SCREAMING_SNAKE_CASE__ : str =F"[{spaced_value}/{self.total} : < :" elif self.predicted_remaining is None: SCREAMING_SNAKE_CASE__ : Any =F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )}" else: SCREAMING_SNAKE_CASE__ : List[str] =( F"[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <" F" {format_time(self.predicted_remaining )}" ) self.label += F", {1/self.average_time_per_item:.2f} it/s" self.label += "]" if self.comment is None or len(self.comment ) == 0 else F", {self.comment}]" self.display() def __magic_name__ ( self : Dict ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: SCREAMING_SNAKE_CASE__ : Optional[int] =disp.display(disp.HTML(self.html_code ) , display_id=__lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def __magic_name__ ( self : Dict ) -> str: if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''' ) ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any]=None ) -> Optional[int]: super().__init__(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =None if column_names is None else [column_names] SCREAMING_SNAKE_CASE__ : Dict =None def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : str =html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: SCREAMING_SNAKE_CASE__ : Optional[Any] =disp.display(disp.HTML(self.html_code ) , display_id=__lowercase ) else: self.output.update(disp.HTML(self.html_code ) ) def __magic_name__ ( self : int , __lowercase : str ) -> List[str]: if self.inner_table is None: SCREAMING_SNAKE_CASE__ : List[str] =[list(values.keys() ), list(values.values() )] else: SCREAMING_SNAKE_CASE__ : Dict =self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =columns self.inner_table.append([values[c] for c in columns] ) def __magic_name__ ( self : Optional[Any] , __lowercase : str , __lowercase : Dict=None , __lowercase : List[Any]=3_00 ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : int =NotebookProgressBar(__lowercase , prefix=__lowercase , parent=self , width=__lowercase ) return self.child_bar def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : Any =None self.display() class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : Tuple ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : int =None SCREAMING_SNAKE_CASE__ : Optional[int] =None SCREAMING_SNAKE_CASE__ : Any =False def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , **__lowercase : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] ='''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : List[Any] =0 SCREAMING_SNAKE_CASE__ : Tuple =[self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''' ) SCREAMING_SNAKE_CASE__ : Dict =NotebookTrainingTracker(state.max_steps , __lowercase ) def __magic_name__ ( self : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : int , **__lowercase : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : Dict =int(state.epoch ) if int(state.epoch ) == state.epoch else F"{state.epoch:.2f}" self.training_tracker.update( state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , ) SCREAMING_SNAKE_CASE__ : List[str] =False def __magic_name__ ( self : Union[str, Any] , __lowercase : Dict , __lowercase : int , __lowercase : Optional[int] , __lowercase : str=None , **__lowercase : int ) -> List[str]: if not has_length(__lowercase ): return if self.prediction_bar is None: if self.training_tracker is not None: SCREAMING_SNAKE_CASE__ : List[str] =self.training_tracker.add_child(len(__lowercase ) ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =NotebookProgressBar(len(__lowercase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def __magic_name__ ( self : Dict , __lowercase : List[Any] , __lowercase : Any , __lowercase : Dict , **__lowercase : Dict ) -> Optional[int]: if self.prediction_bar is not None: self.prediction_bar.close() SCREAMING_SNAKE_CASE__ : int =None def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Tuple=None , **__lowercase : List[str] ) -> int: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: SCREAMING_SNAKE_CASE__ : List[Any] ={'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy SCREAMING_SNAKE_CASE__ : List[Any] =state.global_step self.training_tracker.write_line(__lowercase ) def __magic_name__ ( self : Optional[Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : str , __lowercase : List[str]=None , **__lowercase : List[str] ) -> Tuple: if self.training_tracker is not None: SCREAMING_SNAKE_CASE__ : Tuple ={'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history ): if "loss" in log: SCREAMING_SNAKE_CASE__ : Optional[Any] =log['''loss'''] break if self.first_column == "Epoch": SCREAMING_SNAKE_CASE__ : Optional[Any] =int(state.epoch ) else: SCREAMING_SNAKE_CASE__ : int =state.global_step SCREAMING_SNAKE_CASE__ : Tuple ='''eval''' for k in metrics: if k.endswith('''_loss''' ): SCREAMING_SNAKE_CASE__ : Optional[Any] =re.sub(r'''\_loss$''' , '''''' , __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =metrics.pop('''total_flos''' , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =metrics.pop('''epoch''' , __lowercase ) SCREAMING_SNAKE_CASE__ : int =metrics.pop(F"{metric_key_prefix}_runtime" , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =metrics.pop(F"{metric_key_prefix}_samples_per_second" , __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =metrics.pop(F"{metric_key_prefix}_steps_per_second" , __lowercase ) SCREAMING_SNAKE_CASE__ : int =metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , __lowercase ) for k, v in metrics.items(): if k == F"{metric_key_prefix}_loss": SCREAMING_SNAKE_CASE__ : int =v else: SCREAMING_SNAKE_CASE__ : Tuple =k.split('''_''' ) SCREAMING_SNAKE_CASE__ : Dict =''' '''.join([part.capitalize() for part in splits[1:]] ) SCREAMING_SNAKE_CASE__ : str =v self.training_tracker.write_line(__lowercase ) self.training_tracker.remove_child() SCREAMING_SNAKE_CASE__ : Union[str, Any] =None # Evaluation takes a long time so we should force the next update. SCREAMING_SNAKE_CASE__ : Optional[Any] =True def __magic_name__ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : int , **__lowercase : Optional[int] ) -> Tuple: self.training_tracker.update( state.global_step , comment=F"Epoch {int(state.epoch )}/{state.num_train_epochs}" , force_update=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =None
707
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a_ = list[list[float | int]] def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col] SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0] SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : Tuple =0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, UpperCamelCase__ ): for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col] for cola in range(UpperCamelCase__, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ ) ] def _a( UpperCamelCase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Dict =y_val SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ ) def interpolated_func(UpperCamelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCamelCase__ ) ) return interpolated_func def _a( UpperCamelCase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Any =1 while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ): x_val += 1 ret += poly(UpperCamelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' def _a( UpperCamelCase__ : int = 1_0**1_2 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =1 SCREAMING_SNAKE_CASE__ : str =0 SCREAMING_SNAKE_CASE__ : int =1 SCREAMING_SNAKE_CASE__ : Tuple =1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
708
'''simple docstring''' def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE__ : Union[str, Any] =left SCREAMING_SNAKE_CASE__ : Optional[Any] =point elif point > right: SCREAMING_SNAKE_CASE__ : Optional[int] =right SCREAMING_SNAKE_CASE__ : Tuple =point else: if item < current_item: SCREAMING_SNAKE_CASE__ : str =point - 1 else: SCREAMING_SNAKE_CASE__ : Tuple =point + 1 return None def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ ) def _a( UpperCamelCase__ : Dict ): '''simple docstring''' if collection != sorted(UpperCamelCase__ ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys a_ = 0 if debug == 1: a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') a_ = 6_7 a_ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
665
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) def _a( UpperCamelCase__ : List[Any] ): '''simple docstring''' if isinstance(UpperCamelCase__, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(UpperCamelCase__, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(UpperCamelCase__ ): return [[videos]] raise ValueError(f"Could not make batched video from {videos}" ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = ["""pixel_values"""] def __init__( self : int , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 2_55 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Optional[int] , ) -> None: super().__init__(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =size if size is not None else {'''shortest_edge''': 2_56} SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase ) SCREAMING_SNAKE_CASE__ : str =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , param_name='''crop_size''' ) SCREAMING_SNAKE_CASE__ : int =do_resize SCREAMING_SNAKE_CASE__ : Tuple =size SCREAMING_SNAKE_CASE__ : int =do_center_crop SCREAMING_SNAKE_CASE__ : Dict =crop_size SCREAMING_SNAKE_CASE__ : Tuple =resample SCREAMING_SNAKE_CASE__ : List[Any] =do_rescale SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor SCREAMING_SNAKE_CASE__ : Optional[Any] =offset SCREAMING_SNAKE_CASE__ : Tuple =do_normalize SCREAMING_SNAKE_CASE__ : List[str] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE__ : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str , ) -> np.ndarray: SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_size_dict(__lowercase , default_to_square=__lowercase ) if "shortest_edge" in size: SCREAMING_SNAKE_CASE__ : Optional[int] =get_resize_output_image_size(__lowercase , size['''shortest_edge'''] , default_to_square=__lowercase ) elif "height" in size and "width" in size: SCREAMING_SNAKE_CASE__ : Any =(size['''height'''], size['''width''']) else: raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray: SCREAMING_SNAKE_CASE__ : Any =get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase ) def __magic_name__ ( self : Tuple , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : bool = True , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =image.astype(np.floataa ) if offset: SCREAMING_SNAKE_CASE__ : int =image - (scale / 2) return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase ) def __magic_name__ ( self : Optional[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase ) def __magic_name__ ( self : Dict , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : List[str] =to_numpy_array(__lowercase ) if do_resize: SCREAMING_SNAKE_CASE__ : str =self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) if do_center_crop: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.center_crop(__lowercase , size=__lowercase ) if do_rescale: SCREAMING_SNAKE_CASE__ : List[str] =self.rescale(image=__lowercase , scale=__lowercase , offset=__lowercase ) if do_normalize: SCREAMING_SNAKE_CASE__ : int =self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =to_channel_dimension_format(__lowercase , __lowercase ) return image def __magic_name__ ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : Dict , ) -> PIL.Image.Image: SCREAMING_SNAKE_CASE__ : Union[str, Any] =do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE__ : List[str] =resample if resample is not None else self.resample SCREAMING_SNAKE_CASE__ : str =do_center_crop if do_center_crop is not None else self.do_center_crop SCREAMING_SNAKE_CASE__ : Dict =do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : Any =offset if offset is not None else self.offset SCREAMING_SNAKE_CASE__ : List[Any] =do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE__ : Optional[Any] =image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE__ : Optional[int] =size if size is not None else self.size SCREAMING_SNAKE_CASE__ : str =get_size_dict(__lowercase , default_to_square=__lowercase ) SCREAMING_SNAKE_CASE__ : str =crop_size if crop_size is not None else self.crop_size SCREAMING_SNAKE_CASE__ : Optional[int] =get_size_dict(__lowercase , param_name='''crop_size''' ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =make_batched(__lowercase ) SCREAMING_SNAKE_CASE__ : int =[ [ self._preprocess_image( image=__lowercase , do_resize=__lowercase , size=__lowercase , resample=__lowercase , do_center_crop=__lowercase , crop_size=__lowercase , do_rescale=__lowercase , rescale_factor=__lowercase , offset=__lowercase , do_normalize=__lowercase , image_mean=__lowercase , image_std=__lowercase , data_format=__lowercase , ) for img in video ] for video in videos ] SCREAMING_SNAKE_CASE__ : Tuple ={'''pixel_values''': videos} return BatchFeature(data=__lowercase , tensor_type=__lowercase )
709
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @require_tf def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @slow @require_torch def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : str =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
665
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""] snake_case_ = [ """image_embeds""", """negative_image_embeds""", """image""", ] snake_case_ = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[str] ) -> Tuple: return 32 @property def __magic_name__ ( self : List[str] ) -> str: return 32 @property def __magic_name__ ( self : Any ) -> Optional[int]: return self.time_input_dim @property def __magic_name__ ( self : List[Any] ) -> int: return self.time_input_dim * 4 @property def __magic_name__ ( self : Tuple ) -> Optional[int]: return 1_00 @property def __magic_name__ ( self : Union[str, Any] ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase ) return model @property def __magic_name__ ( self : Dict ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs ) return model def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : str ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu''' SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple =np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : int =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
710
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = JukeboxTokenizer snake_case_ = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def __magic_name__ ( self : Optional[int] ) -> str: import torch SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : str =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __magic_name__ ( self : Any ) -> List[str]: import torch SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
665
0
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' if isinstance(UpperCamelCase__, collections.abc.Iterable ): return x return (x, x) @require_tf class __SCREAMING_SNAKE_CASE : def __magic_name__ ( self : Tuple , __lowercase : Tuple , __lowercase : Tuple ) -> Optional[Any]: pass def __magic_name__ ( self : List[Any] ) -> Any: pass def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: pass def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Any , __lowercase : Dict , __lowercase : Optional[int]=None , **__lowercase : List[str] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =TFVisionTextDualEncoderModel(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def __magic_name__ ( self : int , __lowercase : str , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : str=None , **__lowercase : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Tuple =self.get_vision_text_model(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Any , __lowercase : int , __lowercase : Tuple , __lowercase : Union[str, Any]=None , **__lowercase : Optional[Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : int =self.get_vision_text_model(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] ={'''vision_model''': vision_model, '''text_model''': text_model} SCREAMING_SNAKE_CASE__ : Optional[Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __magic_name__ ( self : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : Any , __lowercase : str=None , **__lowercase : str ) -> str: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_vision_text_model(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : str =TFVisionTextDualEncoderModel.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =after_output[0].numpy() SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowercase , 1e-5 ) def __magic_name__ ( self : List[Any] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : List[Any]=None , **__lowercase : Tuple ) -> int: SCREAMING_SNAKE_CASE__ : List[Any] =self.get_vision_text_model(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =model( input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =output.vision_model_output.attentions self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE__ : int =to_atuple(vision_model.config.image_size ) SCREAMING_SNAKE_CASE__ : Dict =to_atuple(vision_model.config.patch_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) SCREAMING_SNAKE_CASE__ : Optional[Any] =output.text_model_output.attentions self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __magic_name__ ( self : int , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : float ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =np.abs((a - b) ).max() self.assertLessEqual(__lowercase , __lowercase , F"Difference between torch and flax is {diff} (>= {tol})." ) def __magic_name__ ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__lowercase ) def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__lowercase ) def __magic_name__ ( self : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =self.prepare_config_and_inputs() self.check_save_load(**__lowercase ) def __magic_name__ ( self : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE__ : List[Any] =model_a(**__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : str =TFVisionTextDualEncoderModel.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : int =model_a(**__lowercase ) SCREAMING_SNAKE_CASE__ : str =after_outputs[0].numpy() SCREAMING_SNAKE_CASE__ : Optional[int] =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__lowercase , 1e-5 ) @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' ) SCREAMING_SNAKE_CASE__ : List[str] =13 SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] =random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE__ : Dict ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] =TFViTModel(__lowercase , name='''vision_model''' ) SCREAMING_SNAKE_CASE__ : Dict =TFBertModel(__lowercase , name='''text_model''' ) return vision_model, text_model def __magic_name__ ( self : Tuple ) -> Dict: SCREAMING_SNAKE_CASE__ : int =TFViTModelTester(self ) SCREAMING_SNAKE_CASE__ : Any =TFBertModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[int] =vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Any =bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : List[Any] =vision_config_and_inputs ( SCREAMING_SNAKE_CASE__ ) : Tuple =text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): def __magic_name__ ( self : List[str] ) -> int: # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. SCREAMING_SNAKE_CASE__ : List[str] =TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' ) SCREAMING_SNAKE_CASE__ : Any =13 SCREAMING_SNAKE_CASE__ : Tuple =floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] =random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE__ : Optional[int] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : str=None , **__lowercase : Tuple ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_vision_text_model(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =model( input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =output.vision_model_output.attentions self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) SCREAMING_SNAKE_CASE__ : Union[str, Any] =to_atuple(vision_model.config.image_size ) SCREAMING_SNAKE_CASE__ : int =to_atuple(vision_model.config.patch_size ) SCREAMING_SNAKE_CASE__ : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE__ : Tuple =num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) SCREAMING_SNAKE_CASE__ : Any =output.text_model_output.attentions self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __magic_name__ ( self : str , __lowercase : List[str] , __lowercase : List[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Dict =TFDeiTModel(__lowercase , name='''vision_model''' ) SCREAMING_SNAKE_CASE__ : List[Any] =TFRobertaModel(__lowercase , name='''text_model''' ) return vision_model, text_model def __magic_name__ ( self : Dict ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[Any] =TFDeiTModelTester(self ) SCREAMING_SNAKE_CASE__ : str =TFRobertaModelTester(self ) SCREAMING_SNAKE_CASE__ : str =vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Any =bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : int =vision_config_and_inputs ( SCREAMING_SNAKE_CASE__ ) : Dict =text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): def __magic_name__ ( self : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFVisionTextDualEncoderModel.from_vision_text_pretrained( '''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =13 SCREAMING_SNAKE_CASE__ : Any =floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) SCREAMING_SNAKE_CASE__ : Dict =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) SCREAMING_SNAKE_CASE__ : Dict =random_attention_mask([batch_size, 4] ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : str ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =TFCLIPVisionModel(__lowercase , name='''vision_model''' ) SCREAMING_SNAKE_CASE__ : Any =TFBertModel(__lowercase , name='''text_model''' ) return vision_model, text_model def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =TFCLIPVisionModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[Any] =TFBertModelTester(self ) SCREAMING_SNAKE_CASE__ : List[str] =clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : List[str] =bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Dict =vision_config_and_inputs ( SCREAMING_SNAKE_CASE__ ) : Any =text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __magic_name__ ( self : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] =TFVisionTextDualEncoderModel.from_pretrained( '''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowercase ) SCREAMING_SNAKE_CASE__ : str =VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowercase , padding=__lowercase , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : Dict =model(**__lowercase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
711
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_neox""" def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Any =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE__ : Dict =hidden_act SCREAMING_SNAKE_CASE__ : str =rotary_pct SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout SCREAMING_SNAKE_CASE__ : str =classifier_dropout SCREAMING_SNAKE_CASE__ : Any =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : Any =use_cache SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
665
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging a_ = logging.get_logger(__name__) # TODO: upload to AWS a_ = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """retribert""" def __init__( self : Any , __lowercase : Dict=3_05_22 , __lowercase : Tuple=7_68 , __lowercase : Any=8 , __lowercase : Dict=12 , __lowercase : Any=30_72 , __lowercase : Optional[int]="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Optional[Any]=5_12 , __lowercase : List[Any]=2 , __lowercase : Any=0.02 , __lowercase : str=1e-12 , __lowercase : List[str]=True , __lowercase : Optional[Any]=1_28 , __lowercase : Optional[int]=0 , **__lowercase : Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : str =vocab_size SCREAMING_SNAKE_CASE__ : Dict =hidden_size SCREAMING_SNAKE_CASE__ : int =num_hidden_layers SCREAMING_SNAKE_CASE__ : List[Any] =num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_act SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[int] =type_vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : int =share_encoders SCREAMING_SNAKE_CASE__ : Union[str, Any] =projection_dim
712
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 a_ = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 a_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =WATERMARK_BITS SCREAMING_SNAKE_CASE__ : Tuple =WatermarkEncoder() self.encoder.set_watermark('''bits''' , self.watermark ) def __magic_name__ ( self : Tuple , __lowercase : torch.FloatTensor ) -> int: # can't encode images that are smaller than 256 if images.shape[-1] < 2_56: return images SCREAMING_SNAKE_CASE__ : str =(2_55 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.encoder.encode(__lowercase , '''dwtDct''' ) for image in images] SCREAMING_SNAKE_CASE__ : Optional[int] =torch.from_numpy(np.array(__lowercase ) ).permute(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : Optional[int] =torch.clamp(2 * (images / 2_55 - 0.5) , min=-1.0 , max=1.0 ) return images
713
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if colsa != 1: SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if rowsa != rowsa: SCREAMING_SNAKE_CASE__ : str =( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(UpperCamelCase__ ) if len(UpperCamelCase__ ) != rowsa: SCREAMING_SNAKE_CASE__ : Union[str, Any] =( '''Number of initial values must be equal to number of rows in coefficient ''' f"matrix but received {len(UpperCamelCase__ )} and {rowsa}" ) raise ValueError(UpperCamelCase__ ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape strictly_diagonally_dominant(UpperCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =[] for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =0 for col in range(UpperCamelCase__ ): if col == row: SCREAMING_SNAKE_CASE__ : int =table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE__ : Any =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom new_val.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val return [float(UpperCamelCase__ ) for i in new_val] def _a( UpperCamelCase__ : NDArray[floataa] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape SCREAMING_SNAKE_CASE__ : Any =True for i in range(0, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int =0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int]=12 , __lowercase : Any=7 , __lowercase : int=True , __lowercase : Optional[int]=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=99 , __lowercase : int=32 , __lowercase : Dict=32 , __lowercase : int=2 , __lowercase : List[Any]=4 , __lowercase : Any=37 , __lowercase : Union[str, Any]=0.1 , __lowercase : str=0.1 , __lowercase : Dict=5_12 , __lowercase : Optional[Any]=0.02 , __lowercase : str=0 , __lowercase : Optional[int]=None , ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : str =parent SCREAMING_SNAKE_CASE__ : Any =batch_size SCREAMING_SNAKE_CASE__ : str =seq_length SCREAMING_SNAKE_CASE__ : str =is_training SCREAMING_SNAKE_CASE__ : int =use_input_mask SCREAMING_SNAKE_CASE__ : Tuple =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : Optional[int] =projection_dim SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers SCREAMING_SNAKE_CASE__ : List[str] =num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[int] =intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] =dropout SCREAMING_SNAKE_CASE__ : List[str] =attention_dropout SCREAMING_SNAKE_CASE__ : Dict =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range SCREAMING_SNAKE_CASE__ : Dict =scope SCREAMING_SNAKE_CASE__ : List[str] =bos_token_id def __magic_name__ ( self : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : str =None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : str =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: SCREAMING_SNAKE_CASE__ : List[Any] =input_mask.numpy() SCREAMING_SNAKE_CASE__ : List[Any] =input_mask.shape SCREAMING_SNAKE_CASE__ : List[Any] =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__lowercase ): SCREAMING_SNAKE_CASE__ : Any =1 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 SCREAMING_SNAKE_CASE__ : str =self.get_config() return config, input_ids, tf.convert_to_tensor(__lowercase ) def __magic_name__ ( self : List[str] ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def __magic_name__ ( self : int , __lowercase : Any , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =TFBlipTextModel(config=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , training=__lowercase ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase , training=__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ : List[Any] =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : Optional[int] =config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = (TFBlipTextModel,) if is_tf_available() else () snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : Tuple ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[int] =BlipTextModelTester(self ) SCREAMING_SNAKE_CASE__ : Optional[Any] =ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def __magic_name__ ( self : List[str] ) -> Union[str, Any]: self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Dict: pass def __magic_name__ ( self : Tuple ) -> List[str]: pass @unittest.skip(reason='''Blip does not use inputs_embeds''' ) def __magic_name__ ( self : Any ) -> Optional[int]: pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def __magic_name__ ( self : List[str] ) -> List[Any]: pass @unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' ) def __magic_name__ ( self : Union[str, Any] ) -> int: pass @slow def __magic_name__ ( self : str ) -> str: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[Any] =TFBlipTextModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __magic_name__ ( self : int , __lowercase : List[Any]=True ) -> Any: super().test_pt_tf_model_equivalence(allow_missing_keys=__lowercase )
714
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' with open(UpperCamelCase__ ) as metadata_file: SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module'''] # Load the entity vocab file SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer''' with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0] SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0] SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name] SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias'''] SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key] else: SCREAMING_SNAKE_CASE__ : Any =state_dict[key] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(UpperCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' ) SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9) SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.''' SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist() SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE__ : Dict =[ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def _a( UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]'''] SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Optional[int] ={} for entry in data: SCREAMING_SNAKE_CASE__ : Tuple =entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE__ : str =entity_id break SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}" SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id return new_mapping if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) a_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
665
0
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'spiece.model'} a_ = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } a_ = { 'google/bigbird-roberta-base': 4_0_9_6, 'google/bigbird-roberta-large': 4_0_9_6, 'google/bigbird-base-trivia-itc': 4_0_9_6, } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ["""input_ids""", """attention_mask"""] snake_case_ = [] def __init__( self : Any , __lowercase : Union[str, Any] , __lowercase : Tuple="<unk>" , __lowercase : Optional[Any]="<s>" , __lowercase : List[str]="</s>" , __lowercase : Dict="<pad>" , __lowercase : Dict="[SEP]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Dict="[CLS]" , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : str , ) -> None: SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token SCREAMING_SNAKE_CASE__ : List[str] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token SCREAMING_SNAKE_CASE__ : Optional[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : str =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token SCREAMING_SNAKE_CASE__ : List[str] ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , sep_token=__lowercase , mask_token=__lowercase , cls_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) SCREAMING_SNAKE_CASE__ : Dict =vocab_file SCREAMING_SNAKE_CASE__ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowercase ) @property def __magic_name__ ( self : Union[str, Any] ) -> List[str]: return self.sp_model.get_piece_size() def __magic_name__ ( self : Dict ) -> Dict: SCREAMING_SNAKE_CASE__ : Tuple ={self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : Tuple =self.__dict__.copy() SCREAMING_SNAKE_CASE__ : Dict =None return state def __setstate__( self : str , __lowercase : Any ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): SCREAMING_SNAKE_CASE__ : Optional[int] ={} SCREAMING_SNAKE_CASE__ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> List[str]: return self.sp_model.encode(__lowercase , out_type=__lowercase ) def __magic_name__ ( self : int , __lowercase : Any ) -> List[str]: return self.sp_model.piece_to_id(__lowercase ) def __magic_name__ ( self : Dict , __lowercase : int ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =self.sp_model.IdToPiece(__lowercase ) return token def __magic_name__ ( self : List[str] , __lowercase : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Tuple =[] SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''''' SCREAMING_SNAKE_CASE__ : Optional[int] =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__lowercase ) + token SCREAMING_SNAKE_CASE__ : Optional[Any] =True SCREAMING_SNAKE_CASE__ : Tuple =[] else: current_sub_tokens.append(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =False out_string += self.sp_model.decode(__lowercase ) return out_string.strip() def __magic_name__ ( self : List[str] , __lowercase : List[int] , __lowercase : bool = False , __lowercase : bool = None , __lowercase : bool = True , **__lowercase : Tuple , ) -> str: SCREAMING_SNAKE_CASE__ : Dict =kwargs.pop('''use_source_tokenizer''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.convert_ids_to_tokens(__lowercase , skip_special_tokens=__lowercase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 SCREAMING_SNAKE_CASE__ : List[str] =[] SCREAMING_SNAKE_CASE__ : List[Any] =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowercase ) ) SCREAMING_SNAKE_CASE__ : str =[] sub_texts.append(__lowercase ) else: current_sub_text.append(__lowercase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__lowercase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: SCREAMING_SNAKE_CASE__ : str =re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(__lowercase ) ) else: SCREAMING_SNAKE_CASE__ : List[str] =''''''.join(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: SCREAMING_SNAKE_CASE__ : int =self.clean_up_tokenization(__lowercase ) return clean_text else: return text def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , '''wb''' ) as fi: SCREAMING_SNAKE_CASE__ : Optional[int] =self.sp_model.serialized_model_proto() fi.write(__lowercase ) return (out_vocab_file,) def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.cls_token_id] SCREAMING_SNAKE_CASE__ : int =[self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def __magic_name__ ( self : Any , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) + [1] def __magic_name__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : str =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
715
'''simple docstring''' def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] =True for i in range(UpperCamelCase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] =True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[Any] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _a( UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =AlbertConfig.from_json_file(UpperCamelCase__ ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =AlbertForPreTraining(UpperCamelCase__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict(), UpperCamelCase__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
716
'''simple docstring''' import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str]=13 , __lowercase : int=7 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : List[Any]=True , __lowercase : str=True , __lowercase : Tuple=99 , __lowercase : Union[str, Any]=64 , __lowercase : Tuple=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[int]=37 , __lowercase : int="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=5_12 , __lowercase : int=16 , __lowercase : Optional[int]=2 , __lowercase : Tuple=0.02 , __lowercase : List[str]=3 , __lowercase : List[str]=4 , __lowercase : List[str]=None , ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =parent SCREAMING_SNAKE_CASE__ : Any =batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length SCREAMING_SNAKE_CASE__ : Dict =is_training SCREAMING_SNAKE_CASE__ : List[Any] =use_input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_token_type_ids SCREAMING_SNAKE_CASE__ : List[Any] =use_labels SCREAMING_SNAKE_CASE__ : int =vocab_size SCREAMING_SNAKE_CASE__ : str =hidden_size SCREAMING_SNAKE_CASE__ : Any =embedding_size SCREAMING_SNAKE_CASE__ : Tuple =num_hidden_layers SCREAMING_SNAKE_CASE__ : str =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : str =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] =type_vocab_size SCREAMING_SNAKE_CASE__ : Any =type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] =initializer_range SCREAMING_SNAKE_CASE__ : str =num_labels SCREAMING_SNAKE_CASE__ : List[str] =num_choices SCREAMING_SNAKE_CASE__ : List[str] =scope def __magic_name__ ( self : str ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int =None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : int =None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =None SCREAMING_SNAKE_CASE__ : Optional[Any] =None SCREAMING_SNAKE_CASE__ : Optional[int] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__ ( self : List[str] ) -> Any: return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Any , __lowercase : Tuple , __lowercase : Any , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model(__lowercase , token_type_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__ ( self : Dict , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : str =MegatronBertForMaskedLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : List[str] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : str , __lowercase : Any , __lowercase : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForCausalLM(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =MegatronBertForNextSentencePrediction(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =MegatronBertForPreTraining(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : int , __lowercase : Any , __lowercase : Any , __lowercase : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =MegatronBertForQuestionAnswering(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Dict =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__ ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : List[str] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.num_labels SCREAMING_SNAKE_CASE__ : Dict =MegatronBertForSequenceClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : int ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple =self.num_labels SCREAMING_SNAKE_CASE__ : int =MegatronBertForTokenClassification(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self : Dict , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Any , __lowercase : List[Any] , __lowercase : int , __lowercase : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =self.num_choices SCREAMING_SNAKE_CASE__ : List[str] =MegatronBertForMultipleChoice(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Tuple =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ : Optional[Any] =model( __lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__ ( self : str ) -> Any: SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) snake_case_ = True # test_resize_embeddings = False snake_case_ = False def __magic_name__ ( self : List[Any] , __lowercase : List[str] , __lowercase : Tuple , __lowercase : Tuple=False ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Optional[Any] =super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : List[str] =torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =MegatronBertModelTester(self ) SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str ) -> Dict: self.config_tester.run_common_tests() def __magic_name__ ( self : Tuple ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Any: SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowercase ) def __magic_name__ ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowercase ) def __magic_name__ ( self : Dict ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowercase ) def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowercase ) def _a( UpperCamelCase__ : List[str] ): '''simple docstring''' return torch.tensor( UpperCamelCase__, dtype=torch.long, device=UpperCamelCase__, ) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__ ( self : Any ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any ='''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: SCREAMING_SNAKE_CASE__ : Optional[int] =os.path.join(os.environ['''MYDIR'''] , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =MegatronBertModel.from_pretrained(__lowercase ) model.to(__lowercase ) model.half() SCREAMING_SNAKE_CASE__ : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase )[0] SCREAMING_SNAKE_CASE__ : Dict =torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : str =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): SCREAMING_SNAKE_CASE__ : List[Any] =output[0, ii, jj] SCREAMING_SNAKE_CASE__ : Tuple =expected[3 * ii + jj] SCREAMING_SNAKE_CASE__ : List[str] ='''ii={} jj={} a={} b={}'''.format(__lowercase , __lowercase , __lowercase , __lowercase ) self.assertTrue(math.isclose(__lowercase , __lowercase , rel_tol=__lowercase , abs_tol=__lowercase ) , msg=__lowercase )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =0 SCREAMING_SNAKE_CASE__ : str =len(UpperCamelCase__ ) for i in range(n - 1 ): for j in range(i + 1, UpperCamelCase__ ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _a( UpperCamelCase__ : int ): '''simple docstring''' if len(UpperCamelCase__ ) <= 1: return arr, 0 SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) // 2 SCREAMING_SNAKE_CASE__ : List[Any] =arr[0:mid] SCREAMING_SNAKE_CASE__ : List[str] =arr[mid:] SCREAMING_SNAKE_CASE__ : Dict =count_inversions_recursive(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =count_inversions_recursive(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =_count_cross_inversions(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =inversion_p + inversions_q + cross_inversions return c, num_inversions def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =[] SCREAMING_SNAKE_CASE__ : Optional[Any] =0 while i < len(UpperCamelCase__ ) and j < len(UpperCamelCase__ ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(UpperCamelCase__ ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(UpperCamelCase__ ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =[1_0, 2, 1, 5, 5, 2, 1_1] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) SCREAMING_SNAKE_CASE__ : Dict =count_inversions_bf(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =count_inversions_recursive(UpperCamelCase__ ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''', UpperCamelCase__ ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() SCREAMING_SNAKE_CASE__ : Optional[Any] =count_inversions_bf(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =count_inversions_recursive(UpperCamelCase__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''', UpperCamelCase__ ) # an empty list should also have zero inversions SCREAMING_SNAKE_CASE__ : Dict =[] SCREAMING_SNAKE_CASE__ : List[str] =count_inversions_bf(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =count_inversions_recursive(UpperCamelCase__ ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''', UpperCamelCase__ ) if __name__ == "__main__": main()
717
'''simple docstring''' import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) SCREAMING_SNAKE_CASE__ : Dict =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def __magic_name__ ( self : int ) -> Tuple: SCREAMING_SNAKE_CASE__ : Any =F"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split() SCREAMING_SNAKE_CASE__ : List[str] =[sys.executable] + distributed_args execute_subprocess_async(__lowercase , env=os.environ.copy() )
665
0
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : Union[str, Any] , __lowercase : Dict=-1 ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict =label_idx def __magic_name__ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Union[Split, str] ) -> List[InputExample]: '''simple docstring''' if isinstance(__lowercase , __lowercase ): SCREAMING_SNAKE_CASE__ : Dict =mode.value SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(__lowercase , F"{mode}.txt" ) SCREAMING_SNAKE_CASE__ : List[Any] =1 SCREAMING_SNAKE_CASE__ : Optional[int] =[] with open(__lowercase , encoding='''utf-8''' ) as f: SCREAMING_SNAKE_CASE__ : Tuple =[] SCREAMING_SNAKE_CASE__ : int =[] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__lowercase , labels=__lowercase ) ) guid_index += 1 SCREAMING_SNAKE_CASE__ : str =[] SCREAMING_SNAKE_CASE__ : List[Any] =[] else: SCREAMING_SNAKE_CASE__ : Tuple =line.split(''' ''' ) words.append(splits[0] ) if len(__lowercase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__lowercase , labels=__lowercase ) ) return examples def __magic_name__ ( self : Dict , __lowercase : TextIO , __lowercase : TextIO , __lowercase : List ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(__lowercase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: SCREAMING_SNAKE_CASE__ : List[Any] =line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(__lowercase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def __magic_name__ ( self : Tuple , __lowercase : str ) -> List[str]: '''simple docstring''' if path: with open(__lowercase , '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Union[str, Any] =f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE__ : Dict =['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : int ) -> Dict: '''simple docstring''' super().__init__(label_idx=-2 ) def __magic_name__ ( self : Optional[int] , __lowercase : str ) -> List[str]: '''simple docstring''' if path: with open(__lowercase , '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Union[str, Any] =f.read().splitlines() if "O" not in labels: SCREAMING_SNAKE_CASE__ : Dict =['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : Union[Split, str] ) -> List[InputExample]: '''simple docstring''' if isinstance(__lowercase , __lowercase ): SCREAMING_SNAKE_CASE__ : str =mode.value SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(__lowercase , F"{mode}.txt" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =1 SCREAMING_SNAKE_CASE__ : List[Any] =[] with open(__lowercase , encoding='''utf-8''' ) as f: for sentence in parse_incr(__lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] =[] SCREAMING_SNAKE_CASE__ : int =[] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(__lowercase ) == len(__lowercase ) if words: examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__lowercase , labels=__lowercase ) ) guid_index += 1 return examples def __magic_name__ ( self : Tuple , __lowercase : TextIO , __lowercase : TextIO , __lowercase : List ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =0 for sentence in parse_incr(__lowercase ): SCREAMING_SNAKE_CASE__ : Optional[int] =preds_list[example_id] SCREAMING_SNAKE_CASE__ : int ='''''' for token in sentence: out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(__lowercase ) example_id += 1 def __magic_name__ ( self : int , __lowercase : str ) -> List[str]: '''simple docstring''' if path: with open(__lowercase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
718
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = ShapEImgaImgPipeline snake_case_ = ["""image"""] snake_case_ = ["""image"""] snake_case_ = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[Any] ) -> List[Any]: return 32 @property def __magic_name__ ( self : List[str] ) -> Optional[int]: return 32 @property def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: return self.time_input_dim * 4 @property def __magic_name__ ( self : Dict ) -> Union[str, Any]: return 8 @property def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Dict =CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) SCREAMING_SNAKE_CASE__ : str =CLIPVisionModel(__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : int =CLIPImageProcessor( crop_size=2_24 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __magic_name__ ( self : List[str] ) -> Dict: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : str ={ '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } SCREAMING_SNAKE_CASE__ : str =PriorTransformer(**__lowercase ) return model @property def __magic_name__ ( self : Tuple ) -> List[str]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } SCREAMING_SNAKE_CASE__ : str =ShapERenderer(**__lowercase ) return model def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.dummy_prior SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_encoder SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_image_processor SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_renderer SCREAMING_SNAKE_CASE__ : int =HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=__lowercase , clip_sample=__lowercase , clip_sample_range=1.0 , ) SCREAMING_SNAKE_CASE__ : Any ={ '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __magic_name__ ( self : Any , __lowercase : List[str] , __lowercase : Any=0 ) -> Any: SCREAMING_SNAKE_CASE__ : int =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : List[str] ) -> str: SCREAMING_SNAKE_CASE__ : int ='''cpu''' SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : str =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] =np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __magic_name__ ( self : List[Any] ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __magic_name__ ( self : Optional[int] ) -> str: SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch_device == '''cpu''' SCREAMING_SNAKE_CASE__ : Optional[Any] =True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__lowercase , relax_max_difference=__lowercase , ) def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =1 SCREAMING_SNAKE_CASE__ : List[str] =2 SCREAMING_SNAKE_CASE__ : Dict =self.get_dummy_inputs(__lowercase ) for key in inputs.keys(): if key in self.batch_params: SCREAMING_SNAKE_CASE__ : Tuple =batch_size * [inputs[key]] SCREAMING_SNAKE_CASE__ : List[Any] =pipe(**__lowercase , num_images_per_prompt=__lowercase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Optional[Any] ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : List[str] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) SCREAMING_SNAKE_CASE__ : Dict =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) SCREAMING_SNAKE_CASE__ : List[Any] =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) SCREAMING_SNAKE_CASE__ : Tuple =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Tuple =pipe( __lowercase , generator=__lowercase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): a_ = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right a_ = 1_2_8_0_2_2 a_ = 1_2_8_0_2_8 @require_sentencepiece class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = MaMaaaTokenizer snake_case_ = False snake_case_ = False snake_case_ = True def __magic_name__ ( self : Dict ) -> Union[str, Any]: super().setUp() SCREAMING_SNAKE_CASE__ : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] SCREAMING_SNAKE_CASE__ : Any =dict(zip(__lowercase , range(len(__lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ : int =Path(self.tmpdirname ) save_json(__lowercase , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__lowercase , save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) SCREAMING_SNAKE_CASE__ : List[str] =MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self : Any , **__lowercase : Dict ) -> int: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def __magic_name__ ( self : Tuple , __lowercase : Any ) -> Any: return ( "This is a test", "This is a test", ) def __magic_name__ ( self : Optional[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Tuple ='''</s>''' SCREAMING_SNAKE_CASE__ : Any =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def __magic_name__ ( self : List[Any] ) -> str: SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : List[Any] =list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<s>''' ) self.assertEqual(len(__lowercase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('''Skip this test while all models are still to be uploaded.''' ) def __magic_name__ ( self : List[Any] ) -> Union[str, Any]: pass def __magic_name__ ( self : Tuple ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : List[str] =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [2, 3, 4, 5, 6] , ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_string(__lowercase ) self.assertEqual(__lowercase , '''This is a test''' ) @slow def __magic_name__ ( self : str ) -> List[Any]: # fmt: off SCREAMING_SNAKE_CASE__ : str ={'''input_ids''': [[12_80_22, 11_01_08, 3_97, 11, 3_82_72, 22_47, 12_48_11, 2_85, 1_81_05, 15_86, 2_07, 7, 3_95_34, 44_28, 3_97, 10_19, 1_81_05, 15_86, 2_07, 7, 4_13_37, 1_67_86, 2_41, 7, 2_02_14, 17, 12_56_90, 1_03_98, 7, 4_43_78, 5_80_69, 6_83_42, 77_98, 73_43, 11, 2_99, 3_33_10, 4, 1_58, 3_73_50, 9_40_77, 45_69, 2_99, 3_33_10, 90, 4, 5_28_40, 2_90, 4, 3_12_70, 1_12, 2_99, 6_82, 4, 5_28_40, 3_99_53, 1_40_79, 1_93, 5_25_19, 9_08_94, 1_78_94, 12_06_97, 11, 4_04_45, 5_51, 17, 10_19, 5_25_19, 9_08_94, 1_77_56, 9_63, 11, 4_04_45, 4_80, 17, 97_92, 11_20, 51_73, 13_93, 62_40, 1_67_86, 2_41, 12_09_96, 28, 12_45, 13_93, 11_82_40, 1_11_23, 10_19, 9_36_12, 26_91, 1_06_18, 9_80_58, 12_04_09, 19_28, 2_79, 4, 4_06_83, 3_67, 1_78, 2_07, 10_19, 1_03, 10_31_21, 5_06, 6_52_96, 5, 2], [12_80_22, 2_12_17, 3_67, 1_17, 12_54_50, 1_28, 7_19, 7, 73_08, 40, 9_36_12, 1_26_69, 11_16, 1_67_04, 71, 1_77_85, 36_99, 1_55_92, 35, 1_44, 95_84, 2_41, 1_19_43, 7_13, 9_50, 7_99, 22_47, 8_84_27, 1_50, 1_49, 11_88_13, 12_07_06, 10_19, 10_69_06, 8_15_18, 28, 12_24, 2_27_99, 3_97, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_80_22, 16_58, 12_33_11, 51_55, 55_78, 47_22, 2_79, 1_49_47, 23_66, 11_20, 11_97, 14, 13_48, 92_32, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = """facebook/m2m100_418M""" snake_case_ = [ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] snake_case_ = [ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off snake_case_ = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2] @classmethod def __magic_name__ ( cls : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : MaMaaaTokenizer =MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''' ) SCREAMING_SNAKE_CASE__ : Tuple =1 return cls def __magic_name__ ( self : Tuple ) -> Tuple: self.assertEqual(self.tokenizer.get_lang_id('''ar''' ) , 12_80_06 ) self.assertEqual(self.tokenizer.get_lang_id('''en''' ) , 12_80_22 ) self.assertEqual(self.tokenizer.get_lang_id('''ro''' ) , 12_80_76 ) self.assertEqual(self.tokenizer.get_lang_id('''mr''' ) , 12_80_63 ) def __magic_name__ ( self : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : List[Any] =self.tokenizer.get_vocab() self.assertEqual(len(__lowercase ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['''<unk>'''] , 3 ) self.assertIn(self.tokenizer.get_lang_token('''en''' ) , __lowercase ) def __magic_name__ ( self : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] ='''en''' SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __lowercase ) def __magic_name__ ( self : str ) -> Dict: self.assertIn(__lowercase , self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE__ : Tuple =[FR_CODE, 53_64, 82, 86_42, 4, 2_94, 47, 8, 1_40_28, 1_36, 32_86, 97_06, 6, 9_07_97, 6, 14_40_12, 1_62, 8_81_28, 3_00_61, 5, 2] # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase ) self.assertEqual(__lowercase , __lowercase ) self.assertNotIn(self.tokenizer.eos_token , __lowercase ) def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[str] =tempfile.mkdtemp() SCREAMING_SNAKE_CASE__ : List[Any] =self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : int =MaMaaaTokenizer.from_pretrained(__lowercase ) self.assertDictEqual(new_tok.lang_token_to_id , __lowercase ) @require_torch def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] ='''en''' SCREAMING_SNAKE_CASE__ : Tuple ='''fr''' SCREAMING_SNAKE_CASE__ : Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: SCREAMING_SNAKE_CASE__ : Dict =batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __magic_name__ ( self : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Tuple ='''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) SCREAMING_SNAKE_CASE__ : int ='''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __magic_name__ ( self : str ) -> List[str]: SCREAMING_SNAKE_CASE__ : List[str] ='''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) SCREAMING_SNAKE_CASE__ : Dict ='''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __magic_name__ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''' ) self.assertEqual( nested_simplify(__lowercase ) , { # en_XX, A, test, EOS '''input_ids''': [[12_80_22, 58, 41_83, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 12_80_06, } , )
719
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_bigcode""" snake_case_ = ["""past_key_values"""] snake_case_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , __lowercase : Any=5_02_57 , __lowercase : int=10_24 , __lowercase : List[str]=7_68 , __lowercase : Optional[int]=12 , __lowercase : Dict=12 , __lowercase : List[str]=None , __lowercase : int="gelu_pytorch_tanh" , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Optional[Any]=1e-5 , __lowercase : List[str]=0.02 , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Union[str, Any]=5_02_56 , __lowercase : List[Any]=5_02_56 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : Dict=True , **__lowercase : List[Any] , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_positions SCREAMING_SNAKE_CASE__ : Dict =n_embd SCREAMING_SNAKE_CASE__ : Dict =n_layer SCREAMING_SNAKE_CASE__ : Union[str, Any] =n_head SCREAMING_SNAKE_CASE__ : List[str] =n_inner SCREAMING_SNAKE_CASE__ : List[str] =activation_function SCREAMING_SNAKE_CASE__ : List[Any] =resid_pdrop SCREAMING_SNAKE_CASE__ : List[Any] =embd_pdrop SCREAMING_SNAKE_CASE__ : List[str] =attn_pdrop SCREAMING_SNAKE_CASE__ : Dict =layer_norm_epsilon SCREAMING_SNAKE_CASE__ : List[str] =initializer_range SCREAMING_SNAKE_CASE__ : List[Any] =scale_attn_weights SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache SCREAMING_SNAKE_CASE__ : Dict =attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : int =scale_attention_softmax_in_fpaa SCREAMING_SNAKE_CASE__ : Dict =multi_query SCREAMING_SNAKE_CASE__ : Optional[Any] =bos_token_id SCREAMING_SNAKE_CASE__ : Optional[Any] =eos_token_id super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
665
0
'''simple docstring''' from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) a_ = 2_9_9_7_9_2_4_5_8 # Symbols a_ , a_ , a_ , a_ = symbols('ct x y z') def _a( UpperCamelCase__ : float ): '''simple docstring''' if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def _a( UpperCamelCase__ : float ): '''simple docstring''' return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 ) def _a( UpperCamelCase__ : float ): '''simple docstring''' return np.array( [ [gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0], [-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def _a( UpperCamelCase__ : float, UpperCamelCase__ : np.ndarray | None = None ): '''simple docstring''' if event is None: SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(UpperCamelCase__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: a_ = transform(2_9_9_7_9_2_4_5) print('Example of four vector: ') print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values a_ = {ct: c, x: 1, y: 1, z: 1} a_ = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
720
'''simple docstring''' class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Union[str, Any] =size SCREAMING_SNAKE_CASE__ : List[Any] =[0] * size SCREAMING_SNAKE_CASE__ : str =[0] * size @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return index | (index + 1) @staticmethod def __magic_name__ ( __lowercase : int ) -> int: return (index & (index + 1)) - 1 def __magic_name__ ( self : Dict , __lowercase : int , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =value while index < self.size: SCREAMING_SNAKE_CASE__ : Any =self.get_prev(__lowercase ) + 1 if current_left_border == index: SCREAMING_SNAKE_CASE__ : List[str] =value else: SCREAMING_SNAKE_CASE__ : str =max(__lowercase , __lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_next(__lowercase ) def __magic_name__ ( self : Optional[int] , __lowercase : int , __lowercase : int ) -> int: right -= 1 # Because of right is exclusive SCREAMING_SNAKE_CASE__ : str =0 while left <= right: SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_prev(__lowercase ) if left <= current_left: SCREAMING_SNAKE_CASE__ : List[Any] =max(__lowercase , self.tree[right] ) SCREAMING_SNAKE_CASE__ : Any =current_left else: SCREAMING_SNAKE_CASE__ : Optional[Any] =max(__lowercase , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
665
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType a_ = logging.get_logger(__name__) a_ = { 'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json', } # fmt: off a_ = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] a_ = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """whisper""" snake_case_ = ["""past_key_values"""] snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Any , __lowercase : Any=5_18_65 , __lowercase : Optional[Any]=80 , __lowercase : Union[str, Any]=6 , __lowercase : Optional[int]=4 , __lowercase : Optional[int]=6 , __lowercase : int=4 , __lowercase : Dict=15_36 , __lowercase : Any=15_36 , __lowercase : Dict=0.0 , __lowercase : Dict=0.0 , __lowercase : Optional[int]=5_02_57 , __lowercase : Optional[int]=True , __lowercase : List[str]=True , __lowercase : Optional[int]="gelu" , __lowercase : Dict=2_56 , __lowercase : Union[str, Any]=0.0 , __lowercase : Optional[int]=0.0 , __lowercase : Optional[Any]=0.0 , __lowercase : Any=0.02 , __lowercase : Optional[Any]=False , __lowercase : int=15_00 , __lowercase : Any=4_48 , __lowercase : int=5_02_56 , __lowercase : Dict=5_02_56 , __lowercase : Dict=5_02_56 , __lowercase : List[Any]=None , __lowercase : Tuple=[2_20, 5_02_56] , __lowercase : Dict=False , __lowercase : Dict=2_56 , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=0.05 , __lowercase : List[Any]=10 , __lowercase : List[str]=2 , __lowercase : Dict=0.0 , __lowercase : int=10 , __lowercase : Any=0 , __lowercase : int=7 , **__lowercase : int , ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[Any] =vocab_size SCREAMING_SNAKE_CASE__ : int =num_mel_bins SCREAMING_SNAKE_CASE__ : Optional[int] =d_model SCREAMING_SNAKE_CASE__ : Optional[int] =encoder_layers SCREAMING_SNAKE_CASE__ : Optional[Any] =encoder_attention_heads SCREAMING_SNAKE_CASE__ : str =decoder_layers SCREAMING_SNAKE_CASE__ : Optional[Any] =decoder_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] =decoder_ffn_dim SCREAMING_SNAKE_CASE__ : Optional[int] =encoder_ffn_dim SCREAMING_SNAKE_CASE__ : Union[str, Any] =dropout SCREAMING_SNAKE_CASE__ : Optional[int] =attention_dropout SCREAMING_SNAKE_CASE__ : str =activation_dropout SCREAMING_SNAKE_CASE__ : str =activation_function SCREAMING_SNAKE_CASE__ : Tuple =init_std SCREAMING_SNAKE_CASE__ : List[str] =encoder_layerdrop SCREAMING_SNAKE_CASE__ : int =decoder_layerdrop SCREAMING_SNAKE_CASE__ : List[Any] =use_cache SCREAMING_SNAKE_CASE__ : Union[str, Any] =encoder_layers SCREAMING_SNAKE_CASE__ : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE__ : Any =max_source_positions SCREAMING_SNAKE_CASE__ : List[str] =max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE__ : List[Any] =classifier_proj_size SCREAMING_SNAKE_CASE__ : List[str] =use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE__ : List[str] =apply_spec_augment SCREAMING_SNAKE_CASE__ : str =mask_time_prob SCREAMING_SNAKE_CASE__ : List[str] =mask_time_length SCREAMING_SNAKE_CASE__ : int =mask_time_min_masks SCREAMING_SNAKE_CASE__ : Any =mask_feature_prob SCREAMING_SNAKE_CASE__ : List[str] =mask_feature_length SCREAMING_SNAKE_CASE__ : int =mask_feature_min_masks SCREAMING_SNAKE_CASE__ : Optional[int] =median_filter_width super().__init__( pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , suppress_tokens=__lowercase , begin_suppress_tokens=__lowercase , **__lowercase , ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): @property def __magic_name__ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: SCREAMING_SNAKE_CASE__ : Optional[Any] =OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: SCREAMING_SNAKE_CASE__ : List[str] ={0: '''batch'''} else: SCREAMING_SNAKE_CASE__ : Union[str, Any] ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__lowercase , direction='''inputs''' ) return common_inputs def __magic_name__ ( self : int , __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __lowercase : int = -1 , __lowercase : int = -1 , __lowercase : bool = False , __lowercase : Optional["TensorType"] = None , __lowercase : int = 2_20_50 , __lowercase : float = 5.0 , __lowercase : int = 2_20 , ) -> Mapping[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] =OrderedDict() SCREAMING_SNAKE_CASE__ : Optional[int] =OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=__lowercase , framework=__lowercase , sampling_rate=__lowercase , time_duration=__lowercase , frequency=__lowercase , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =encoder_inputs['''input_features'''].shape[2] SCREAMING_SNAKE_CASE__ : List[Any] =encoder_sequence_length // 2 if self.use_past else seq_length SCREAMING_SNAKE_CASE__ : Any =super().generate_dummy_inputs( preprocessor.tokenizer , __lowercase , __lowercase , __lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =encoder_inputs.pop('''input_features''' ) SCREAMING_SNAKE_CASE__ : List[str] =decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: SCREAMING_SNAKE_CASE__ : Union[str, Any] =decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def __magic_name__ ( self : Dict ) -> float: return 1e-3
721
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = ["""vqvae"""] def __init__( self : int , __lowercase : AutoencoderKL , __lowercase : UNetaDConditionModel , __lowercase : Mel , __lowercase : Union[DDIMScheduler, DDPMScheduler] , ) -> int: super().__init__() self.register_modules(unet=__lowercase , scheduler=__lowercase , mel=__lowercase , vqvae=__lowercase ) def __magic_name__ ( self : List[str] ) -> int: return 50 if isinstance(self.scheduler , __lowercase ) else 10_00 @torch.no_grad() def __call__( self : Dict , __lowercase : int = 1 , __lowercase : str = None , __lowercase : np.ndarray = None , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = None , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : float = 0 , __lowercase : torch.Generator = None , __lowercase : float = 0 , __lowercase : torch.Tensor = None , __lowercase : torch.Tensor = None , __lowercase : Dict=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: SCREAMING_SNAKE_CASE__ : Optional[Any] =steps or self.get_default_steps() self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: SCREAMING_SNAKE_CASE__ : Union[str, Any] =(self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: SCREAMING_SNAKE_CASE__ : Optional[int] =randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__lowercase , device=self.device , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =noise SCREAMING_SNAKE_CASE__ : List[str] =None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =self.mel.audio_slice_to_image(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) SCREAMING_SNAKE_CASE__ : int =(input_image / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: SCREAMING_SNAKE_CASE__ : Optional[int] =self.vqvae.encode(torch.unsqueeze(__lowercase , 0 ) ).latent_dist.sample( generator=__lowercase )[0] SCREAMING_SNAKE_CASE__ : int =self.vqvae.config.scaling_factor * input_images if start_step > 0: SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , self.scheduler.timesteps[start_step - 1] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) SCREAMING_SNAKE_CASE__ : Optional[Any] =int(mask_start_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : Tuple =int(mask_end_secs * pixels_per_second ) SCREAMING_SNAKE_CASE__ : int =self.scheduler.add_noise(__lowercase , __lowercase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __lowercase ): SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase , __lowercase )['''sample'''] else: SCREAMING_SNAKE_CASE__ : str =self.unet(__lowercase , __lowercase )['''sample'''] if isinstance(self.scheduler , __lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , eta=__lowercase , generator=__lowercase , )['''prev_sample'''] else: SCREAMING_SNAKE_CASE__ : Any =self.scheduler.step( model_output=__lowercase , timestep=__lowercase , sample=__lowercase , generator=__lowercase , )['''prev_sample'''] if mask is not None: if mask_start > 0: SCREAMING_SNAKE_CASE__ : Optional[Any] =mask[:, step, :, :mask_start] if mask_end > 0: SCREAMING_SNAKE_CASE__ : List[str] =mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance SCREAMING_SNAKE_CASE__ : str =1 / self.vqvae.config.scaling_factor * images SCREAMING_SNAKE_CASE__ : int =self.vqvae.decode(__lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : List[str] =(images / 2 + 0.5).clamp(0 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[Any] =images.cpu().permute(0 , 2 , 3 , 1 ).numpy() SCREAMING_SNAKE_CASE__ : Dict =(images * 2_55).round().astype('''uint8''' ) SCREAMING_SNAKE_CASE__ : Any =list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__lowercase , mode='''RGB''' ).convert('''L''' ) for _ in images) ) SCREAMING_SNAKE_CASE__ : Optional[int] =[self.mel.image_to_audio(__lowercase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__lowercase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__lowercase ) ) @torch.no_grad() def __magic_name__ ( self : Optional[int] , __lowercase : List[Image.Image] , __lowercase : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __lowercase ) self.scheduler.set_timesteps(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) SCREAMING_SNAKE_CASE__ : str =(sample / 2_55) * 2 - 1 SCREAMING_SNAKE_CASE__ : str =torch.Tensor(__lowercase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): SCREAMING_SNAKE_CASE__ : Tuple =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps SCREAMING_SNAKE_CASE__ : Any =self.scheduler.alphas_cumprod[t] SCREAMING_SNAKE_CASE__ : int =( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) SCREAMING_SNAKE_CASE__ : int =1 - alpha_prod_t SCREAMING_SNAKE_CASE__ : Optional[Any] =self.unet(__lowercase , __lowercase )['''sample'''] SCREAMING_SNAKE_CASE__ : Union[str, Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output SCREAMING_SNAKE_CASE__ : str =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) SCREAMING_SNAKE_CASE__ : Any =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __magic_name__ ( __lowercase : torch.Tensor , __lowercase : torch.Tensor , __lowercase : float ) -> torch.Tensor: SCREAMING_SNAKE_CASE__ : Optional[int] =acos(torch.dot(torch.flatten(__lowercase ) , torch.flatten(__lowercase ) ) / torch.norm(__lowercase ) / torch.norm(__lowercase ) ) return sin((1 - alpha) * theta ) * xa / sin(__lowercase ) + sin(alpha * theta ) * xa / sin(__lowercase )
665
0
def _a( UpperCamelCase__ : str ): '''simple docstring''' return "".join(chr(ord(UpperCamelCase__ ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
700
'''simple docstring''' from math import isqrt def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =[True] * max_number for i in range(2, isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2, UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Any =False return [i for i in range(2, UpperCamelCase__ ) if is_prime[i]] def _a( UpperCamelCase__ : int = 1_0**8 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
701
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = SpeechTaTokenizer snake_case_ = False snake_case_ = True def __magic_name__ ( self : int ) -> Any: super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test''' SCREAMING_SNAKE_CASE__ : int ='''this is a test''' return input_text, output_text def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase ) return text, ids def __magic_name__ ( self : Dict ) -> str: SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>''' SCREAMING_SNAKE_CASE__ : Optional[int] =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase ) def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__lowercase ) , 81 ) def __magic_name__ ( self : Dict ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __magic_name__ ( self : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase ) for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Any =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size SCREAMING_SNAKE_CASE__ : int =len(__lowercase ) self.assertNotEqual(__lowercase , 0 ) self.assertEqual(__lowercase , __lowercase ) self.assertEqual(__lowercase , len(__lowercase ) ) self.assertEqual(__lowercase , all_size_a + len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase ) self.assertGreaterEqual(len(__lowercase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __magic_name__ ( self : Optional[Any] ) -> Any: pass def __magic_name__ ( self : List[str] ) -> List[Any]: pass def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase ) # fmt: off self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase ) self.assertListEqual( __lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def __magic_name__ ( self : List[str] ) -> List[str]: # Use custom sequence because this tokenizer does not handle numbers. SCREAMING_SNAKE_CASE__ : List[Any] =[ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off SCREAMING_SNAKE_CASE__ : str ={ '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : dict ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : set[int] =set() # To detect a back edge, keep track of vertices currently in the recursion stack SCREAMING_SNAKE_CASE__ : set[int] =set() return any( node not in visited and depth_first_search(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) for node in graph ) def _a( UpperCamelCase__ : dict, UpperCamelCase__ : int, UpperCamelCase__ : set, UpperCamelCase__ : set ) -> List[Any]: '''simple docstring''' visited.add(UpperCamelCase__ ) rec_stk.add(UpperCamelCase__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(UpperCamelCase__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
702
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __lowercase : Optional[int] , __lowercase : str=13 , __lowercase : int=10 , __lowercase : List[Any]=3 , __lowercase : List[str]=2 , __lowercase : int=2 , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : int=32 , __lowercase : List[Any]=5 , __lowercase : Union[str, Any]=4 , __lowercase : Any=37 , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=10 , __lowercase : int=0.02 , __lowercase : str="divided_space_time" , __lowercase : Union[str, Any]=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =parent SCREAMING_SNAKE_CASE__ : List[str] =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_size SCREAMING_SNAKE_CASE__ : List[Any] =num_channels SCREAMING_SNAKE_CASE__ : int =patch_size SCREAMING_SNAKE_CASE__ : Tuple =num_frames SCREAMING_SNAKE_CASE__ : List[Any] =is_training SCREAMING_SNAKE_CASE__ : List[str] =use_labels SCREAMING_SNAKE_CASE__ : Optional[Any] =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : int =num_attention_heads SCREAMING_SNAKE_CASE__ : Tuple =intermediate_size SCREAMING_SNAKE_CASE__ : List[str] =hidden_act SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] =attention_type SCREAMING_SNAKE_CASE__ : Union[str, Any] =initializer_range SCREAMING_SNAKE_CASE__ : Any =scope SCREAMING_SNAKE_CASE__ : int =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token SCREAMING_SNAKE_CASE__ : List[str] =(image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE__ : str =(num_frames) * self.num_patches_per_frame + 1 def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : List[Any] =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : int =self.get_config() return config, pixel_values, labels def __magic_name__ ( self : int ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels return config def __magic_name__ ( self : int , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =TimesformerModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self : List[str] , __lowercase : str , __lowercase : Tuple , __lowercase : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =TimesformerForVideoClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) # verify the logits shape SCREAMING_SNAKE_CASE__ : Tuple =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , __lowercase ) def __magic_name__ ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ : int =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =config_and_inputs SCREAMING_SNAKE_CASE__ : Any ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () snake_case_ = ( {"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification} if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : str ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Dict =TimesformerModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] =ConfigTester( self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 ) def __magic_name__ ( self : str , __lowercase : Dict , __lowercase : str , __lowercase : Optional[int]=False ) -> int: SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(__lowercase ) if return_labels: if model_class in get_values(__lowercase ): SCREAMING_SNAKE_CASE__ : Any =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowercase ) return inputs_dict def __magic_name__ ( self : List[Any] ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def __magic_name__ ( self : List[Any] ) -> Optional[int]: pass def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =model_class(__lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ : Any =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) ) def __magic_name__ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : str =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[str] =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Dict =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) def __magic_name__ ( self : int ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =TimesformerModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> List[str]: if not self.has_attentions: pass else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : List[Any] =True for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.seq_length SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.model_tester.num_frames SCREAMING_SNAKE_CASE__ : Optional[Any] =True SCREAMING_SNAKE_CASE__ : str =False SCREAMING_SNAKE_CASE__ : Tuple =True SCREAMING_SNAKE_CASE__ : Dict =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE__ : List[Any] =True SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Any =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE__ : Optional[int] =True SCREAMING_SNAKE_CASE__ : Union[str, Any] =True SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(**self._prepare_for_class(__lowercase , __lowercase ) ) self.assertEqual(out_len + 1 , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : List[str] =outputs.attentions self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __magic_name__ ( self : Tuple ) -> List[Any]: def check_hidden_states_output(__lowercase : Tuple , __lowercase : Dict , __lowercase : Optional[int] ): SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : int =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowercase ) , __lowercase ) SCREAMING_SNAKE_CASE__ : int =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : List[str] =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) SCREAMING_SNAKE_CASE__ : Any =np.load(UpperCamelCase__ ) return list(UpperCamelCase__ ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : Any ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __magic_name__ ( self : Any ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : int =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =self.default_image_processor SCREAMING_SNAKE_CASE__ : Tuple =prepare_video() SCREAMING_SNAKE_CASE__ : Any =image_processor(video[:8] , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Optional[int] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : List[str] =torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =set() # edges = list of graph's edges SCREAMING_SNAKE_CASE__ : str =get_edges(UpperCamelCase__ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: SCREAMING_SNAKE_CASE__ : Optional[int] =edges.pop() chosen_vertices.add(UpperCamelCase__ ) chosen_vertices.add(UpperCamelCase__ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(UpperCamelCase__ ) return chosen_vertices def _a( UpperCamelCase__ : dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
703
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt' ), 'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt', 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt' ), 'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt', 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json', 'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json', 'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json', 'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json', 'bert-base-multilingual-uncased': ( 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json' ), 'bert-base-multilingual-cased': ( 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json' ), 'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json', 'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json', 'bert-large-uncased-whole-word-masking': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking': ( 'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json' ), 'bert-large-uncased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-large-cased-whole-word-masking-finetuned-squad': ( 'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json' ), 'bert-base-cased-finetuned-mrpc': ( 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-cased': ( 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json' ), 'bert-base-german-dbmdz-uncased': ( 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-cased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json' ), 'TurkuNLP/bert-base-finnish-uncased-v1': ( 'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json' ), 'wietsedv/bert-base-dutch-cased': ( 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json' ), }, } a_ = { 'bert-base-uncased': 5_1_2, 'bert-large-uncased': 5_1_2, 'bert-base-cased': 5_1_2, 'bert-large-cased': 5_1_2, 'bert-base-multilingual-uncased': 5_1_2, 'bert-base-multilingual-cased': 5_1_2, 'bert-base-chinese': 5_1_2, 'bert-base-german-cased': 5_1_2, 'bert-large-uncased-whole-word-masking': 5_1_2, 'bert-large-cased-whole-word-masking': 5_1_2, 'bert-large-uncased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-large-cased-whole-word-masking-finetuned-squad': 5_1_2, 'bert-base-cased-finetuned-mrpc': 5_1_2, 'bert-base-german-dbmdz-cased': 5_1_2, 'bert-base-german-dbmdz-uncased': 5_1_2, 'TurkuNLP/bert-base-finnish-cased-v1': 5_1_2, 'TurkuNLP/bert-base-finnish-uncased-v1': 5_1_2, 'wietsedv/bert-base-dutch-cased': 5_1_2, } a_ = { 'bert-base-uncased': {'do_lower_case': True}, 'bert-large-uncased': {'do_lower_case': True}, 'bert-base-cased': {'do_lower_case': False}, 'bert-large-cased': {'do_lower_case': False}, 'bert-base-multilingual-uncased': {'do_lower_case': True}, 'bert-base-multilingual-cased': {'do_lower_case': False}, 'bert-base-chinese': {'do_lower_case': False}, 'bert-base-german-cased': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking': {'do_lower_case': False}, 'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True}, 'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False}, 'bert-base-cased-finetuned-mrpc': {'do_lower_case': False}, 'bert-base-german-dbmdz-cased': {'do_lower_case': False}, 'bert-base-german-dbmdz-uncased': {'do_lower_case': True}, 'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False}, 'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True}, 'wietsedv/bert-base-dutch-cased': {'do_lower_case': False}, } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_INIT_CONFIGURATION snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = BertTokenizer def __init__( self : int , __lowercase : Union[str, Any]=None , __lowercase : Tuple=None , __lowercase : str=True , __lowercase : Optional[Any]="[UNK]" , __lowercase : Tuple="[SEP]" , __lowercase : Any="[PAD]" , __lowercase : List[Any]="[CLS]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : Tuple=True , __lowercase : str=None , **__lowercase : Any , ) -> Optional[Any]: super().__init__( __lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : str =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __lowercase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __lowercase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __lowercase ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ : int =getattr(__lowercase , normalizer_state.pop('''type''' ) ) SCREAMING_SNAKE_CASE__ : Any =do_lower_case SCREAMING_SNAKE_CASE__ : Any =strip_accents SCREAMING_SNAKE_CASE__ : Dict =tokenize_chinese_chars SCREAMING_SNAKE_CASE__ : Union[str, Any] =normalizer_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =do_lower_case def __magic_name__ ( self : int , __lowercase : Optional[Any] , __lowercase : Union[str, Any]=None ) -> int: SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : List[str] =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__ ( self : List[Any] , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase )
665
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : str=None, UpperCamelCase__ : List[Any]=None ): '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(tf.math.not_equal(UpperCamelCase__, config.pad_token_id ), tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class __SCREAMING_SNAKE_CASE : snake_case_ = OPTConfig snake_case_ = {} snake_case_ = """gelu""" def __init__( self : Optional[Any] , __lowercase : str , __lowercase : Tuple=13 , __lowercase : Optional[Any]=7 , __lowercase : List[str]=True , __lowercase : Any=False , __lowercase : Union[str, Any]=99 , __lowercase : str=16 , __lowercase : Union[str, Any]=2 , __lowercase : Tuple=4 , __lowercase : Dict=4 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.1 , __lowercase : Tuple=0.1 , __lowercase : Any=20 , __lowercase : Any=2 , __lowercase : List[Any]=1 , __lowercase : Any=0 , __lowercase : Any=16 , __lowercase : Dict=16 , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : str =parent SCREAMING_SNAKE_CASE__ : str =batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =seq_length SCREAMING_SNAKE_CASE__ : Optional[int] =is_training SCREAMING_SNAKE_CASE__ : str =use_labels SCREAMING_SNAKE_CASE__ : Any =vocab_size SCREAMING_SNAKE_CASE__ : str =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : Optional[int] =num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] =intermediate_size SCREAMING_SNAKE_CASE__ : int =hidden_act SCREAMING_SNAKE_CASE__ : Dict =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =max_position_embeddings SCREAMING_SNAKE_CASE__ : Tuple =eos_token_id SCREAMING_SNAKE_CASE__ : Tuple =pad_token_id SCREAMING_SNAKE_CASE__ : Optional[int] =bos_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] =embed_dim SCREAMING_SNAKE_CASE__ : Dict =word_embed_proj_dim SCREAMING_SNAKE_CASE__ : Union[str, Any] =False def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : int =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE__ : Any =tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE__ : List[Any] =self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowercase , **self.config_updates , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_opt_inputs_dict(__lowercase , __lowercase ) return config, inputs_dict def __magic_name__ ( self : str , __lowercase : Optional[int] , __lowercase : Any ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTModel(config=__lowercase ) SCREAMING_SNAKE_CASE__ : int =inputs_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : Optional[Any] =input_ids[:1, :] SCREAMING_SNAKE_CASE__ : Union[str, Any] =inputs_dict['''attention_mask'''][:1, :] SCREAMING_SNAKE_CASE__ : Optional[int] =1 # first forward pass SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase ) SCREAMING_SNAKE_CASE__ : str =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : Any =ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : List[str] =tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE__ : Optional[int] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase , attention_mask=__lowercase )[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : str =output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : int =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 ) @require_tf class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () snake_case_ = (TFOPTForCausalLM,) if is_tf_available() else () snake_case_ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = 10 def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTModelTester(self ) SCREAMING_SNAKE_CASE__ : Any =ConfigTester(self , config_class=__lowercase ) def __magic_name__ ( self : str ) -> Tuple: self.config_tester.run_common_tests() def __magic_name__ ( self : Union[str, Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : str =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(__lowercase : List[str] , __lowercase : Union[str, Any] ): if hasattr(__lowercase , '''weight''' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(__lowercase , '''weight''' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings SCREAMING_SNAKE_CASE__ : str =model_class(config=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =_get_word_embedding_weight(__lowercase , model.get_input_embeddings() ) SCREAMING_SNAKE_CASE__ : int =_get_word_embedding_weight(__lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =_get_word_embedding_weight(__lowercase , model.get_input_embeddings() ) SCREAMING_SNAKE_CASE__ : int =_get_word_embedding_weight(__lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. SCREAMING_SNAKE_CASE__ : List[Any] =size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , __lowercase ) # check that weights remain the same after resizing SCREAMING_SNAKE_CASE__ : Union[str, Any] =True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: SCREAMING_SNAKE_CASE__ : List[str] =False self.assertTrue(__lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: SCREAMING_SNAKE_CASE__ : Any =False self.assertTrue(__lowercase ) def _a( UpperCamelCase__ : Dict ): '''simple docstring''' return tf.constant(UpperCamelCase__, dtype=tf.intaa ) @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = 99 def __magic_name__ ( self : Dict ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : str =tf.ones((4, 1) , dtype=tf.intaa ) * 2 SCREAMING_SNAKE_CASE__ : Any =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) SCREAMING_SNAKE_CASE__ : List[str] =input_ids.shape[0] SCREAMING_SNAKE_CASE__ : Dict =OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __magic_name__ ( self : Optional[Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =TFOPTModel.from_pretrained('''facebook/opt-350m''' ) SCREAMING_SNAKE_CASE__ : Any =_long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) SCREAMING_SNAKE_CASE__ : Tuple =tf.not_equal(__lowercase , model.config.pad_token_id ) with tf.GradientTape(): SCREAMING_SNAKE_CASE__ : List[str] =model(input_ids=__lowercase , attention_mask=__lowercase ).last_hidden_state SCREAMING_SNAKE_CASE__ : Optional[int] =(1, 11, 5_12) self.assertEqual(output.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3] , __lowercase , atol=4e-3 ) ) SCREAMING_SNAKE_CASE__ : str =tf.function(__lowercase , jit_compile=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =xla_generate(__lowercase , __lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , __lowercase , atol=4e-2 ) ) @require_tf @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : Union[str, Any] ) -> List[str]: super().setUp() SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''facebook/opt-350m''' def __magic_name__ ( self : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTForCausalLM.from_pretrained(self.path_model ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer.from_pretrained(self.path_model ) SCREAMING_SNAKE_CASE__ : Dict =[ '''Today is a beautiful day and I want to''', '''In the city of''', '''Paris is the capital of France and''', '''Computers and mobile phones have taken''', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , return_tensors='''tf''' , padding=__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) SCREAMING_SNAKE_CASE__ : Tuple =tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-4 ) ) SCREAMING_SNAKE_CASE__ : Tuple =tf.function(__lowercase , jit_compile=__lowercase ) SCREAMING_SNAKE_CASE__ : int =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-4 ) ) @require_tf @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __magic_name__ ( self : List[str] ) -> Optional[int]: return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : int ='''facebook/opt-125m''' SCREAMING_SNAKE_CASE__ : Any =[ '''Today is a beautiful day and I want to''', '''In the city of New York, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] SCREAMING_SNAKE_CASE__ : List[str] =[] SCREAMING_SNAKE_CASE__ : Tuple =GPTaTokenizer.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =TFOPTForCausalLM.from_pretrained(__lowercase ) for prompt in self.prompts: SCREAMING_SNAKE_CASE__ : Dict =tokenizer(__lowercase , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE__ : Dict =model.generate(__lowercase , max_length=10 ) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase ) predicted_outputs += generated_string self.assertListEqual(__lowercase , __lowercase ) def __magic_name__ ( self : Dict ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Any ='''facebook/opt-350m''' SCREAMING_SNAKE_CASE__ : List[Any] =GPTaTokenizer.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =TFOPTForCausalLM.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Any ='''left''' # use different length sentences to test batching SCREAMING_SNAKE_CASE__ : Optional[int] =[ '''Hello, my dog is a little''', '''Today, I''', ] SCREAMING_SNAKE_CASE__ : int =tokenizer(__lowercase , return_tensors='''tf''' , padding=__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =inputs['''input_ids'''] SCREAMING_SNAKE_CASE__ : Dict =model.generate(input_ids=__lowercase , attention_mask=inputs['''attention_mask'''] ) SCREAMING_SNAKE_CASE__ : Any =tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE__ : List[str] =model.generate(input_ids=__lowercase ) SCREAMING_SNAKE_CASE__ : int =inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) ) SCREAMING_SNAKE_CASE__ : int =tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE__ : Any =model.generate(input_ids=__lowercase , max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE__ : Any =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.decode(output_padded[0] , skip_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =[ '''Hello, my dog is a little bit of a dork.\nI\'m a little bit''', '''Today, I was in the middle of a conversation with a friend about the''', ] self.assertListEqual(__lowercase , __lowercase ) self.assertListEqual(__lowercase , [non_padded_sentence, padded_sentence] ) def __magic_name__ ( self : Tuple ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[str] ='''facebook/opt-350m''' SCREAMING_SNAKE_CASE__ : List[str] =[ '''Today is a beautiful day and I want to''', '''In the city of San Francisco, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] SCREAMING_SNAKE_CASE__ : int =[] SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer.from_pretrained(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =TFOPTForCausalLM.from_pretrained(__lowercase ) for prompt in self.prompts: SCREAMING_SNAKE_CASE__ : str =tokenizer(__lowercase , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE__ : int =model.generate(__lowercase , max_length=10 ) SCREAMING_SNAKE_CASE__ : int =tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase ) predicted_outputs += generated_string self.assertListEqual(__lowercase , __lowercase )
704
'''simple docstring''' import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a_ = False a_ = False def _a( UpperCamelCase__ : Namespace ): '''simple docstring''' return TrainCommand(UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): @staticmethod def __magic_name__ ( __lowercase : ArgumentParser ) -> Any: SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' ) train_parser.add_argument( '''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , ) train_parser.add_argument( '''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' ) train_parser.add_argument( '''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' ) train_parser.add_argument( '''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' ) train_parser.add_argument( '''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' ) train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' ) train_parser.add_argument( '''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , ) train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' ) train_parser.add_argument( '''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' ) train_parser.add_argument( '''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' ) train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' ) train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' ) train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' ) train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' ) train_parser.set_defaults(func=__lowercase ) def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' ) SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch''' os.makedirs(args.output , exist_ok=__lowercase ) SCREAMING_SNAKE_CASE__ : Any =args.output SCREAMING_SNAKE_CASE__ : str =args.column_label SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text SCREAMING_SNAKE_CASE__ : Tuple =args.column_id self.logger.info(F"Loading {args.task} pipeline for {args.model}" ) if args.task == "text_classification": SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F"Loading dataset from {args.train_data}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =None if args.validation_data: self.logger.info(F"Loading validation dataset from {args.validation_data}" ) SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon def __magic_name__ ( self : Any ) -> str: if self.framework == "tf": return self.run_tf() return self.run_torch() def __magic_name__ ( self : Optional[int] ) -> Tuple: raise NotImplementedError def __magic_name__ ( self : Dict ) -> List[Any]: self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
665
0
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _a( UpperCamelCase__ : Features ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =np.inf def set_batch_size(UpperCamelCase__ : FeatureType ) -> None: nonlocal batch_size if isinstance(UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : str =min(UpperCamelCase__, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(UpperCamelCase__, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int =min(UpperCamelCase__, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(UpperCamelCase__, UpperCamelCase__ ) and feature.dtype == "binary": SCREAMING_SNAKE_CASE__ : Dict =min(UpperCamelCase__, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(UpperCamelCase__, UpperCamelCase__ ) return None if batch_size is np.inf else batch_size class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __init__( self : Any , __lowercase : NestedDataStructureLike[PathLike] , __lowercase : Optional[NamedSplit] = None , __lowercase : Optional[Features] = None , __lowercase : str = None , __lowercase : bool = False , __lowercase : bool = False , __lowercase : Optional[int] = None , **__lowercase : str , ) -> Optional[int]: super().__init__( __lowercase , split=__lowercase , features=__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase , streaming=__lowercase , num_proc=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : Dict =path_or_paths if isinstance(__lowercase , __lowercase ) else {self.split: path_or_paths} SCREAMING_SNAKE_CASE__ : Union[str, Any] =_PACKAGED_DATASETS_MODULES['''parquet'''][1] SCREAMING_SNAKE_CASE__ : List[str] =Parquet( cache_dir=__lowercase , data_files=__lowercase , features=__lowercase , hash=__lowercase , **__lowercase , ) def __magic_name__ ( self : Optional[Any] ) -> List[Any]: # Build iterable dataset if self.streaming: SCREAMING_SNAKE_CASE__ : Tuple =self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: SCREAMING_SNAKE_CASE__ : Any =None SCREAMING_SNAKE_CASE__ : Any =None SCREAMING_SNAKE_CASE__ : Tuple =None SCREAMING_SNAKE_CASE__ : Tuple =None self.builder.download_and_prepare( download_config=__lowercase , download_mode=__lowercase , verification_mode=__lowercase , base_path=__lowercase , num_proc=self.num_proc , ) SCREAMING_SNAKE_CASE__ : Dict =self.builder.as_dataset( split=self.split , verification_mode=__lowercase , in_memory=self.keep_in_memory ) return dataset class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , __lowercase : Dataset , __lowercase : Union[PathLike, BinaryIO] , __lowercase : Optional[int] = None , **__lowercase : int , ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =dataset SCREAMING_SNAKE_CASE__ : str =path_or_buf SCREAMING_SNAKE_CASE__ : int =batch_size or get_writer_batch_size(dataset.features ) SCREAMING_SNAKE_CASE__ : Optional[Any] =parquet_writer_kwargs def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : Tuple =self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: SCREAMING_SNAKE_CASE__ : Optional[int] =self._write(file_obj=__lowercase , batch_size=__lowercase , **self.parquet_writer_kwargs ) else: SCREAMING_SNAKE_CASE__ : int =self._write(file_obj=self.path_or_buf , batch_size=__lowercase , **self.parquet_writer_kwargs ) return written def __magic_name__ ( self : List[Any] , __lowercase : BinaryIO , __lowercase : int , **__lowercase : Tuple ) -> int: SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Optional[int] =parquet_writer_kwargs.pop('''path_or_buf''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dataset.features.arrow_schema SCREAMING_SNAKE_CASE__ : List[Any] =pq.ParquetWriter(__lowercase , schema=__lowercase , **__lowercase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __lowercase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): SCREAMING_SNAKE_CASE__ : Dict =query_table( table=self.dataset._data , key=slice(__lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__lowercase ) written += batch.nbytes writer.close() return written
705
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaImgaImgPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """image"""] snake_case_ = [ """image_embeds""", """negative_image_embeds""", """image""", ] snake_case_ = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __magic_name__ ( self : List[str] ) -> Tuple: return 32 @property def __magic_name__ ( self : List[str] ) -> str: return 32 @property def __magic_name__ ( self : Any ) -> Optional[int]: return self.time_input_dim @property def __magic_name__ ( self : List[Any] ) -> int: return self.time_input_dim * 4 @property def __magic_name__ ( self : Tuple ) -> Optional[int]: return 1_00 @property def __magic_name__ ( self : Union[str, Any] ) -> Tuple: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(**__lowercase ) return model @property def __magic_name__ ( self : Dict ) -> Any: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __magic_name__ ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] =VQModel(**self.dummy_movq_kwargs ) return model def __magic_name__ ( self : str ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[str] =self.dummy_unet SCREAMING_SNAKE_CASE__ : Optional[Any] =self.dummy_movq SCREAMING_SNAKE_CASE__ : Optional[Any] ={ '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } SCREAMING_SNAKE_CASE__ : str =DDIMScheduler(**__lowercase ) SCREAMING_SNAKE_CASE__ : Any ={ '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __magic_name__ ( self : str , __lowercase : Optional[Any] , __lowercase : Any=0 ) -> int: SCREAMING_SNAKE_CASE__ : Optional[int] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __lowercase ) # create init_image SCREAMING_SNAKE_CASE__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowercase ) ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : Any =Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) if str(__lowercase ).startswith('''mps''' ): SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(__lowercase ) else: SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device=__lowercase ).manual_seed(__lowercase ) SCREAMING_SNAKE_CASE__ : str ={ '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def __magic_name__ ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[Any] ='''cpu''' SCREAMING_SNAKE_CASE__ : Tuple =self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Dict =self.pipeline_class(**__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] =pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =pipe(**self.get_dummy_inputs(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Tuple =output.images SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipe( **self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0] SCREAMING_SNAKE_CASE__ : List[Any] =image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : List[str] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE__ : Tuple =np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : int ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE__ : List[Any] ='''A red cartoon frog, 4k''' SCREAMING_SNAKE_CASE__ : Optional[int] =KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Dict =pipeline.to(__lowercase ) pipeline.set_progress_bar_config(disable=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =pipe_prior( __lowercase , generator=__lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( image=__lowercase , image_embeds=__lowercase , negative_image_embeds=__lowercase , generator=__lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , ) SCREAMING_SNAKE_CASE__ : int =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__lowercase , __lowercase )
665
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """pegasus""" snake_case_ = ["""past_key_values"""] snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self : Optional[Any] , __lowercase : List[Any]=5_02_65 , __lowercase : Union[str, Any]=10_24 , __lowercase : Optional[Any]=12 , __lowercase : List[str]=40_96 , __lowercase : int=16 , __lowercase : List[Any]=12 , __lowercase : Any=40_96 , __lowercase : List[str]=16 , __lowercase : Any=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : str=True , __lowercase : Dict=True , __lowercase : Optional[Any]="gelu" , __lowercase : Union[str, Any]=10_24 , __lowercase : Optional[int]=0.1 , __lowercase : List[Any]=0.0 , __lowercase : Any=0.0 , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=0 , __lowercase : Tuple=False , __lowercase : str=0 , __lowercase : Optional[int]=1 , __lowercase : List[Any]=1 , **__lowercase : Tuple , ) -> Dict: SCREAMING_SNAKE_CASE__ : int =vocab_size SCREAMING_SNAKE_CASE__ : Optional[Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : List[str] =d_model SCREAMING_SNAKE_CASE__ : Tuple =encoder_ffn_dim SCREAMING_SNAKE_CASE__ : str =encoder_layers SCREAMING_SNAKE_CASE__ : List[str] =encoder_attention_heads SCREAMING_SNAKE_CASE__ : Any =decoder_ffn_dim SCREAMING_SNAKE_CASE__ : Optional[Any] =decoder_layers SCREAMING_SNAKE_CASE__ : Any =decoder_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] =dropout SCREAMING_SNAKE_CASE__ : Optional[Any] =attention_dropout SCREAMING_SNAKE_CASE__ : Any =activation_dropout SCREAMING_SNAKE_CASE__ : List[Any] =activation_function SCREAMING_SNAKE_CASE__ : str =init_std SCREAMING_SNAKE_CASE__ : Optional[int] =encoder_layerdrop SCREAMING_SNAKE_CASE__ : Optional[int] =decoder_layerdrop SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_cache SCREAMING_SNAKE_CASE__ : Union[str, Any] =encoder_layers SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , ) @property def __magic_name__ ( self : List[Any] ) -> int: return self.encoder_attention_heads @property def __magic_name__ ( self : List[str] ) -> int: return self.d_model
706
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar a_ = TypeVar('T') class __SCREAMING_SNAKE_CASE ( Generic[T] ): snake_case_ = 42 # Cache store of keys snake_case_ = 42 # References of the keys in cache snake_case_ = 10 # Maximum capacity of cache def __init__( self : Dict , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : Any =deque() SCREAMING_SNAKE_CASE__ : str =set() if not n: SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize elif n < 0: raise ValueError('''n should be an integer greater than 0.''' ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =n def __magic_name__ ( self : List[str] , __lowercase : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop() self.key_reference.remove(__lowercase ) else: self.dq_store.remove(__lowercase ) self.dq_store.appendleft(__lowercase ) self.key_reference.add(__lowercase ) def __magic_name__ ( self : Union[str, Any] ) -> None: for k in self.dq_store: print(__lowercase ) def __repr__( self : List[Any] ) -> str: return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}" if __name__ == "__main__": import doctest doctest.testmod() a_ = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
665
0
'''simple docstring''' from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =ArgumentParser('''Accelerate CLI tool''', usage='''accelerate <command> [<args>]''', allow_abbrev=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =parser.add_subparsers(help='''accelerate command helpers''' ) # Register commands get_config_parser(subparsers=UpperCamelCase__ ) env_command_parser(subparsers=UpperCamelCase__ ) launch_command_parser(subparsers=UpperCamelCase__ ) tpu_command_parser(subparsers=UpperCamelCase__ ) test_command_parser(subparsers=UpperCamelCase__ ) # Let's go SCREAMING_SNAKE_CASE__ : Dict =parser.parse_args() if not hasattr(UpperCamelCase__, '''func''' ): parser.print_help() exit(1 ) # Run args.func(UpperCamelCase__ ) if __name__ == "__main__": main()
707
'''simple docstring''' from __future__ import annotations from collections.abc import Callable a_ = list[list[float | int]] def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : float for row in range(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col] SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0] SCREAMING_SNAKE_CASE__ : Any =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row] for rowa in range(row + 1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE__ : Tuple =0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, UpperCamelCase__ ): for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col] for cola in range(UpperCamelCase__, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ ) ] def _a( UpperCamelCase__ : list[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Matrix SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : int for x_val, y_val in enumerate(UpperCamelCase__ ): for col in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE__ : Dict =y_val SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ ) def interpolated_func(UpperCamelCase__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(UpperCamelCase__ ) ) return interpolated_func def _a( UpperCamelCase__ : int ): '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE__ : int =0 SCREAMING_SNAKE_CASE__ : Callable[[int], int] SCREAMING_SNAKE_CASE__ : int for poly in polynomials: SCREAMING_SNAKE_CASE__ : Any =1 while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ): x_val += 1 ret += poly(UpperCamelCase__ ) return ret if __name__ == "__main__": print(F'''{solution() = }''')
665
0
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available a_ = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = 42 @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' snake_case_ = 42 snake_case_ = 42 snake_case_ = None snake_case_ = None class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): '''simple docstring''' snake_case_ = """train""" snake_case_ = """dev""" snake_case_ = """test""" class __SCREAMING_SNAKE_CASE : '''simple docstring''' @staticmethod def __magic_name__ ( __lowercase : Union[str, Any] , __lowercase : Union[Split, str] ) -> List[InputExample]: raise NotImplementedError @staticmethod def __magic_name__ ( __lowercase : str ) -> List[str]: raise NotImplementedError @staticmethod def __magic_name__ ( __lowercase : List[InputExample] , __lowercase : List[str] , __lowercase : int , __lowercase : PreTrainedTokenizer , __lowercase : Union[str, Any]=False , __lowercase : str="[CLS]" , __lowercase : List[str]=1 , __lowercase : Any="[SEP]" , __lowercase : Optional[Any]=False , __lowercase : Optional[Any]=False , __lowercase : Tuple=0 , __lowercase : Optional[int]=0 , __lowercase : List[str]=-1_00 , __lowercase : List[Any]=0 , __lowercase : List[Any]=True , ) -> List[InputFeatures]: SCREAMING_SNAKE_CASE__ : Any ={label: i for i, label in enumerate(__lowercase )} SCREAMING_SNAKE_CASE__ : Any =[] for ex_index, example in enumerate(__lowercase ): if ex_index % 1_00_00 == 0: logger.info('''Writing example %d of %d''' , __lowercase , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[int] =[] SCREAMING_SNAKE_CASE__ : str =[] for word, label in zip(example.words , example.labels ): SCREAMING_SNAKE_CASE__ : int =tokenizer.tokenize(__lowercase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__lowercase ) > 0: tokens.extend(__lowercase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowercase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.num_special_tokens_to_add() if len(__lowercase ) > max_seq_length - special_tokens_count: SCREAMING_SNAKE_CASE__ : List[Any] =tokens[: (max_seq_length - special_tokens_count)] SCREAMING_SNAKE_CASE__ : Tuple =label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] SCREAMING_SNAKE_CASE__ : List[str] =[sequence_a_segment_id] * len(__lowercase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =[cls_token] + tokens SCREAMING_SNAKE_CASE__ : List[Any] =[pad_token_label_id] + label_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] =[cls_token_segment_id] + segment_ids SCREAMING_SNAKE_CASE__ : int =tokenizer.convert_tokens_to_ids(__lowercase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. SCREAMING_SNAKE_CASE__ : Tuple =[1 if mask_padding_with_zero else 0] * len(__lowercase ) # Zero-pad up to the sequence length. SCREAMING_SNAKE_CASE__ : List[Any] =max_seq_length - len(__lowercase ) if pad_on_left: SCREAMING_SNAKE_CASE__ : Any =([pad_token] * padding_length) + input_ids SCREAMING_SNAKE_CASE__ : Tuple =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids SCREAMING_SNAKE_CASE__ : List[Any] =([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__lowercase ) == max_seq_length assert len(__lowercase ) == max_seq_length assert len(__lowercase ) == max_seq_length assert len(__lowercase ) == max_seq_length if ex_index < 5: logger.info('''*** Example ***''' ) logger.info('''guid: %s''' , example.guid ) logger.info('''tokens: %s''' , ''' '''.join([str(__lowercase ) for x in tokens] ) ) logger.info('''input_ids: %s''' , ''' '''.join([str(__lowercase ) for x in input_ids] ) ) logger.info('''input_mask: %s''' , ''' '''.join([str(__lowercase ) for x in input_mask] ) ) logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowercase ) for x in segment_ids] ) ) logger.info('''label_ids: %s''' , ''' '''.join([str(__lowercase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: SCREAMING_SNAKE_CASE__ : Optional[Any] =None features.append( InputFeatures( input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , label_ids=__lowercase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): '''simple docstring''' snake_case_ = 42 snake_case_ = nn.CrossEntropyLoss().ignore_index def __init__( self : Union[str, Any] , __lowercase : TokenClassificationTask , __lowercase : str , __lowercase : PreTrainedTokenizer , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[int] = None , __lowercase : Union[str, Any]=False , __lowercase : Split = Split.train , ) -> Dict: # Load data features from cache or dataset file SCREAMING_SNAKE_CASE__ : str =os.path.join( __lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowercase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE__ : List[str] =cached_features_file + '''.lock''' with FileLock(__lowercase ): if os.path.exists(__lowercase ) and not overwrite_cache: logger.info(F"Loading features from cached file {cached_features_file}" ) SCREAMING_SNAKE_CASE__ : Dict =torch.load(__lowercase ) else: logger.info(F"Creating features from dataset file at {data_dir}" ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =token_classification_task.read_examples_from_file(__lowercase , __lowercase ) # TODO clean up all this to leverage built-in features of tokenizers SCREAMING_SNAKE_CASE__ : Tuple =token_classification_task.convert_examples_to_features( __lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F"Saving features into cached file {cached_features_file}" ) torch.save(self.features , __lowercase ) def __len__( self : Optional[Any] ) -> Tuple: return len(self.features ) def __getitem__( self : str , __lowercase : List[str] ) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class __SCREAMING_SNAKE_CASE : '''simple docstring''' snake_case_ = 42 snake_case_ = -100 def __init__( self : Union[str, Any] , __lowercase : TokenClassificationTask , __lowercase : str , __lowercase : PreTrainedTokenizer , __lowercase : List[str] , __lowercase : str , __lowercase : Optional[int] = None , __lowercase : List[str]=False , __lowercase : Split = Split.train , ) -> str: SCREAMING_SNAKE_CASE__ : Any =token_classification_task.read_examples_from_file(__lowercase , __lowercase ) # TODO clean up all this to leverage built-in features of tokenizers SCREAMING_SNAKE_CASE__ : Optional[int] =token_classification_task.convert_examples_to_features( __lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: SCREAMING_SNAKE_CASE__ : Optional[int] =tf.data.Dataset.from_generator( __lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , ( {'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] =tf.data.Dataset.from_generator( __lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , ( { '''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] ), '''token_type_ids''': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def __magic_name__ ( self : Optional[int] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Any =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : List[str] ) -> Tuple: return len(self.features ) def __getitem__( self : List[Any] , __lowercase : Any ) -> InputFeatures: return self.features[i]
708
'''simple docstring''' def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : Union[str, Any] =len(UpperCamelCase__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Union[str, Any] =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE__ : Union[str, Any] =left SCREAMING_SNAKE_CASE__ : Optional[Any] =point elif point > right: SCREAMING_SNAKE_CASE__ : Optional[int] =right SCREAMING_SNAKE_CASE__ : Tuple =point else: if item < current_item: SCREAMING_SNAKE_CASE__ : str =point - 1 else: SCREAMING_SNAKE_CASE__ : Tuple =point + 1 return None def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE__ : Dict =left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(UpperCamelCase__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) elif point > right: return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 ) else: return interpolation_search_by_recursion( UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ ) def _a( UpperCamelCase__ : Dict ): '''simple docstring''' if collection != sorted(UpperCamelCase__ ): raise ValueError('''Collection must be ascending sorted''' ) return True if __name__ == "__main__": import sys a_ = 0 if debug == 1: a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') a_ = 6_7 a_ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
709
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]: pass @is_pipeline_test @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @require_torch def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__lowercase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @require_tf def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[Any] =pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], [ {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, {'''score''': 0.333, '''label''': ANY(__lowercase )}, ], ] , ) @slow @require_torch def __magic_name__ ( self : List[Any] ) -> Dict: SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__ ( self : Union[str, Any] ) -> int: SCREAMING_SNAKE_CASE__ : str =pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
665
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/nllb-large-en-ro': 1_0_2_4, 'facebook/nllb-200-distilled-600M': 1_0_2_4, } # fmt: off a_ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = ["""input_ids""", """attention_mask"""] snake_case_ = NllbTokenizer snake_case_ = [] snake_case_ = [] def __init__( self : Any , __lowercase : int=None , __lowercase : str=None , __lowercase : str="<s>" , __lowercase : Any="</s>" , __lowercase : List[str]="</s>" , __lowercase : Any="<s>" , __lowercase : Optional[int]="<unk>" , __lowercase : List[Any]="<pad>" , __lowercase : Any="<mask>" , __lowercase : Any=None , __lowercase : str=None , __lowercase : Union[str, Any]=None , __lowercase : str=False , **__lowercase : List[str] , ) -> Optional[Any]: # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token SCREAMING_SNAKE_CASE__ : Union[str, Any] =legacy_behaviour super().__init__( vocab_file=__lowercase , tokenizer_file=__lowercase , bos_token=__lowercase , eos_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , unk_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , additional_special_tokens=__lowercase , legacy_behaviour=__lowercase , **__lowercase , ) SCREAMING_SNAKE_CASE__ : List[str] =vocab_file SCREAMING_SNAKE_CASE__ : str =False if not self.vocab_file else True SCREAMING_SNAKE_CASE__ : Union[str, Any] =FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) SCREAMING_SNAKE_CASE__ : List[Any] ={ lang_code: self.convert_tokens_to_ids(__lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } SCREAMING_SNAKE_CASE__ : Optional[int] =src_lang if src_lang is not None else '''eng_Latn''' SCREAMING_SNAKE_CASE__ : Dict =self.convert_tokens_to_ids(self._src_lang ) SCREAMING_SNAKE_CASE__ : List[str] =tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __magic_name__ ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def __magic_name__ ( self : List[Any] , __lowercase : str ) -> None: SCREAMING_SNAKE_CASE__ : List[Any] =new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __magic_name__ ( self : Optional[int] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __magic_name__ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ) -> List[int]: SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.sep_token_id] SCREAMING_SNAKE_CASE__ : List[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__ ( self : Any , __lowercase : str , __lowercase : str , __lowercase : Optional[str] , __lowercase : Optional[str] , **__lowercase : Dict ) -> List[Any]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =src_lang SCREAMING_SNAKE_CASE__ : Any =self(__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : int =self.convert_tokens_to_ids(__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =tgt_lang_id return inputs def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : str = "eng_Latn" , __lowercase : Optional[List[str]] = None , __lowercase : str = "fra_Latn" , **__lowercase : Optional[Any] , ) -> BatchEncoding: SCREAMING_SNAKE_CASE__ : Union[str, Any] =src_lang SCREAMING_SNAKE_CASE__ : str =tgt_lang return super().prepare_seqaseq_batch(__lowercase , __lowercase , **__lowercase ) def __magic_name__ ( self : int ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def __magic_name__ ( self : List[str] ) -> Union[str, Any]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __magic_name__ ( self : str , __lowercase : int ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =self.convert_tokens_to_ids(__lowercase ) if self.legacy_behaviour: SCREAMING_SNAKE_CASE__ : List[Any] =[] SCREAMING_SNAKE_CASE__ : Optional[int] =[self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.cur_lang_code] SCREAMING_SNAKE_CASE__ : Tuple =[self.eos_token_id] SCREAMING_SNAKE_CASE__ : Tuple =self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE__ : List[Any] =self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE__ : Dict =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __magic_name__ ( self : Optional[Any] , __lowercase : str ) -> None: SCREAMING_SNAKE_CASE__ : List[str] =self.convert_tokens_to_ids(__lowercase ) if self.legacy_behaviour: SCREAMING_SNAKE_CASE__ : Any =[] SCREAMING_SNAKE_CASE__ : Optional[int] =[self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE__ : Union[str, Any] =[self.cur_lang_code] SCREAMING_SNAKE_CASE__ : Optional[Any] =[self.eos_token_id] SCREAMING_SNAKE_CASE__ : List[Any] =self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE__ : Dict =self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE__ : Optional[int] =processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowercase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory." ) return SCREAMING_SNAKE_CASE__ : Tuple =os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ): copyfile(self.vocab_file , __lowercase ) return (out_vocab_file,)
710
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): snake_case_ = JukeboxTokenizer snake_case_ = { """artist""": """Zac Brown Band""", """genres""": """Country""", """lyrics""": """I met a traveller from an antique land, Who said \"Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away """, } @require_torch def __magic_name__ ( self : Optional[int] ) -> str: import torch SCREAMING_SNAKE_CASE__ : List[str] =JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' ) SCREAMING_SNAKE_CASE__ : str =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : str =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __magic_name__ ( self : Any ) -> List[str]: import torch SCREAMING_SNAKE_CASE__ : int =JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(**self.metas )['''input_ids'''] # fmt: off SCREAMING_SNAKE_CASE__ : Optional[int] =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
665
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ = 1_6 a_ = 3_2 def _a( UpperCamelCase__ : Accelerator, UpperCamelCase__ : int = 1_6, UpperCamelCase__ : str = "bert-base-cased" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =AutoTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =load_dataset('''glue''', '''mrpc''' ) def tokenize_function(UpperCamelCase__ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE__ : int =tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=UpperCamelCase__, max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset SCREAMING_SNAKE_CASE__ : str =datasets.map( UpperCamelCase__, batched=UpperCamelCase__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=UpperCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE__ : Tuple =tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(UpperCamelCase__ : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''' ) return tokenizer.pad(UpperCamelCase__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE__ : Any =DataLoader( tokenized_datasets['''train'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =DataLoader( tokenized_datasets['''validation'''], shuffle=UpperCamelCase__, collate_fn=UpperCamelCase__, batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE__ : Any =config['''lr'''] SCREAMING_SNAKE_CASE__ : int =int(config['''num_epochs'''] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =int(config['''seed'''] ) SCREAMING_SNAKE_CASE__ : str =int(config['''batch_size'''] ) SCREAMING_SNAKE_CASE__ : List[Any] =args.model_name_or_path set_seed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =get_dataloaders(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE__ : Any =AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__, return_dict=UpperCamelCase__ ) # Instantiate optimizer SCREAMING_SNAKE_CASE__ : List[Any] =( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) SCREAMING_SNAKE_CASE__ : int =optimizer_cls(params=model.parameters(), lr=UpperCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: SCREAMING_SNAKE_CASE__ : Tuple =accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: SCREAMING_SNAKE_CASE__ : Optional[int] =1 SCREAMING_SNAKE_CASE__ : Any =(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_linear_schedule_with_warmup( optimizer=UpperCamelCase__, num_warmup_steps=0, num_training_steps=UpperCamelCase__, ) else: SCREAMING_SNAKE_CASE__ : List[str] =DummyScheduler(UpperCamelCase__, total_num_steps=UpperCamelCase__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE__ : List[Any] =accelerator.prepare( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) # We need to keep track of how many total steps we have iterated over SCREAMING_SNAKE_CASE__ : int =0 # We also need to keep track of the stating epoch so files are named properly SCREAMING_SNAKE_CASE__ : Optional[Any] =0 # Now we train the model SCREAMING_SNAKE_CASE__ : List[Any] =evaluate.load('''glue''', '''mrpc''' ) SCREAMING_SNAKE_CASE__ : Optional[int] =0 SCREAMING_SNAKE_CASE__ : Dict ={} for epoch in range(UpperCamelCase__, UpperCamelCase__ ): model.train() for step, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =outputs.loss SCREAMING_SNAKE_CASE__ : Any =loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() SCREAMING_SNAKE_CASE__ : Any =0 for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Tuple =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times SCREAMING_SNAKE_CASE__ : str =accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase__ ) - 1: SCREAMING_SNAKE_CASE__ : Any =predictions[: len(eval_dataloader.dataset ) - samples_seen] SCREAMING_SNAKE_CASE__ : Optional[Any] =references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase__, references=UpperCamelCase__, ) SCREAMING_SNAKE_CASE__ : List[str] =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: SCREAMING_SNAKE_CASE__ : Dict =eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, '''all_results.json''' ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=UpperCamelCase__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=UpperCamelCase__, ) parser.add_argument( '''--output_dir''', type=UpperCamelCase__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--performance_lower_bound''', type=UpperCamelCase__, default=UpperCamelCase__, help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''', ) parser.add_argument( '''--num_epochs''', type=UpperCamelCase__, default=3, help='''Number of train epochs.''', ) SCREAMING_SNAKE_CASE__ : Any =parser.parse_args() SCREAMING_SNAKE_CASE__ : str ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6} training_function(UpperCamelCase__, UpperCamelCase__ ) if __name__ == "__main__": main()
711
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): snake_case_ = """gpt_neox""" def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict: super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings SCREAMING_SNAKE_CASE__ : Any =hidden_size SCREAMING_SNAKE_CASE__ : str =num_hidden_layers SCREAMING_SNAKE_CASE__ : Any =num_attention_heads SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size SCREAMING_SNAKE_CASE__ : Dict =hidden_act SCREAMING_SNAKE_CASE__ : str =rotary_pct SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout SCREAMING_SNAKE_CASE__ : str =classifier_dropout SCREAMING_SNAKE_CASE__ : Any =initializer_range SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps SCREAMING_SNAKE_CASE__ : Any =use_cache SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F"got {self.rope_scaling}" ) SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
665
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ = logging.getLogger(__name__) def _a( UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[Any] ): '''simple docstring''' return (preds == labels).mean() @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) snake_case_ = field( default=lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) snake_case_ = field( default=lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) snake_case_ = field( default=lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __SCREAMING_SNAKE_CASE : snake_case_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) snake_case_ = field(metadata={"""help""": """Should contain the data files for the task."""} ) snake_case_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) snake_case_ = field( default=lowerCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''', UpperCamelCase__ ) # Set seed set_seed(training_args.seed ) try: SCREAMING_SNAKE_CASE__ : List[Any] =processors[data_args.task_name]() SCREAMING_SNAKE_CASE__ : int =processor.get_labels() SCREAMING_SNAKE_CASE__ : Tuple =len(UpperCamelCase__ ) except KeyError: raise ValueError('''Task not found: %s''' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. SCREAMING_SNAKE_CASE__ : Any =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=UpperCamelCase__, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) SCREAMING_SNAKE_CASE__ : Tuple =AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=UpperCamelCase__, cache_dir=model_args.cache_dir, ) # Get datasets SCREAMING_SNAKE_CASE__ : Tuple =( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=UpperCamelCase__, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=UpperCamelCase__, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[int] =np.argmax(p.predictions, axis=1 ) return {"acc": simple_accuracy(UpperCamelCase__, p.label_ids )} # Data collator SCREAMING_SNAKE_CASE__ : Optional[Any] =DataCollatorWithPadding(UpperCamelCase__, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer SCREAMING_SNAKE_CASE__ : Dict =Trainer( model=UpperCamelCase__, args=UpperCamelCase__, train_dataset=UpperCamelCase__, eval_dataset=UpperCamelCase__, compute_metrics=UpperCamelCase__, data_collator=UpperCamelCase__, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation SCREAMING_SNAKE_CASE__ : Dict ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) SCREAMING_SNAKE_CASE__ : List[str] =trainer.evaluate() SCREAMING_SNAKE_CASE__ : str =os.path.join(training_args.output_dir, '''eval_results.txt''' ) if trainer.is_world_master(): with open(UpperCamelCase__, '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''', UpperCamelCase__, UpperCamelCase__ ) writer.write('''%s = %s\n''' % (key, value) ) results.update(UpperCamelCase__ ) return results def _a( UpperCamelCase__ : Optional[int] ): '''simple docstring''' main() if __name__ == "__main__": main()
712
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( lowerCamelCase ): def __magic_name__ ( self : Tuple ) -> List[str]: SCREAMING_SNAKE_CASE__ : Dict =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowercase , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__lowercase , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__lowercase , '''num_attention_heads''' ) ) class __SCREAMING_SNAKE_CASE : def __init__( self : int , __lowercase : List[Any] , __lowercase : Optional[Any]=13 , __lowercase : Any=32 , __lowercase : Union[str, Any]=2 , __lowercase : Any=3 , __lowercase : Tuple=6_40 , __lowercase : Optional[Any]=4 , __lowercase : Optional[int]="silu" , __lowercase : List[str]=3 , __lowercase : int=32 , __lowercase : Tuple=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=0.1 , __lowercase : Optional[Any]=0.02 , __lowercase : int=True , __lowercase : Optional[Any]=True , __lowercase : Optional[int]=10 , __lowercase : Optional[Any]=None , ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : str =parent SCREAMING_SNAKE_CASE__ : List[Any] =batch_size SCREAMING_SNAKE_CASE__ : Optional[Any] =image_size SCREAMING_SNAKE_CASE__ : List[Any] =patch_size SCREAMING_SNAKE_CASE__ : Tuple =num_channels SCREAMING_SNAKE_CASE__ : Tuple =last_hidden_size SCREAMING_SNAKE_CASE__ : Tuple =num_attention_heads SCREAMING_SNAKE_CASE__ : List[Any] =hidden_act SCREAMING_SNAKE_CASE__ : Tuple =conv_kernel_size SCREAMING_SNAKE_CASE__ : Any =output_stride SCREAMING_SNAKE_CASE__ : str =hidden_dropout_prob SCREAMING_SNAKE_CASE__ : List[str] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Any =classifier_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] =use_labels SCREAMING_SNAKE_CASE__ : Union[str, Any] =is_training SCREAMING_SNAKE_CASE__ : List[str] =num_labels SCREAMING_SNAKE_CASE__ : Tuple =initializer_range SCREAMING_SNAKE_CASE__ : Dict =scope def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any =None SCREAMING_SNAKE_CASE__ : Any =None if self.use_labels: SCREAMING_SNAKE_CASE__ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : List[str] =self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__ ( self : int ) -> List[Any]: return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ : Dict =MobileViTModel(config=__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : str =model(__lowercase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__ ( self : Any , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : str ) -> List[Any]: SCREAMING_SNAKE_CASE__ : Dict =self.num_labels SCREAMING_SNAKE_CASE__ : Tuple =MobileViTForImageClassification(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : List[Any] =model(__lowercase , labels=__lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__ ( self : Dict , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] , __lowercase : str ) -> Any: SCREAMING_SNAKE_CASE__ : List[Any] =self.num_labels SCREAMING_SNAKE_CASE__ : Optional[int] =MobileViTForSemanticSegmentation(__lowercase ) model.to(__lowercase ) model.eval() SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) SCREAMING_SNAKE_CASE__ : str =model(__lowercase , labels=__lowercase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__ ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE__ : List[str] =self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ : List[Any] =config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ): snake_case_ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) snake_case_ = ( { """feature-extraction""": MobileViTModel, """image-classification""": MobileViTForImageClassification, """image-segmentation""": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def __magic_name__ ( self : Tuple ) -> Dict: SCREAMING_SNAKE_CASE__ : Tuple =MobileViTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[str] =MobileViTConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase ) def __magic_name__ ( self : Any ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def __magic_name__ ( self : Union[str, Any] ) -> Any: pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def __magic_name__ ( self : Optional[Any] ) -> Optional[int]: pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: pass def __magic_name__ ( self : Any ) -> Any: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : List[Any] =model_class(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : int =[*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] =['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowercase ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __magic_name__ ( self : List[str] ) -> int: pass def __magic_name__ ( self : Dict ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> List[Any]: def check_hidden_states_output(__lowercase : Optional[Any] , __lowercase : str , __lowercase : str ): SCREAMING_SNAKE_CASE__ : List[str] =model_class(__lowercase ) model.to(__lowercase ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE__ : Dict =model(**self._prepare_for_class(__lowercase , __lowercase ) ) SCREAMING_SNAKE_CASE__ : Any =outputs.hidden_states SCREAMING_SNAKE_CASE__ : Dict =5 self.assertEqual(len(__lowercase ) , __lowercase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. SCREAMING_SNAKE_CASE__ : Tuple =2 for i in range(len(__lowercase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : str =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : Tuple =True check_hidden_states_output(__lowercase , __lowercase , __lowercase ) def __magic_name__ ( self : Any ) -> str: SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowercase ) def __magic_name__ ( self : Optional[Any] ) -> Any: SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase ) @slow def __magic_name__ ( self : Optional[int] ) -> Optional[Any]: for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Dict =MobileViTModel.from_pretrained(__lowercase ) self.assertIsNotNone(__lowercase ) def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __magic_name__ ( self : int ) -> Union[str, Any]: return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def __magic_name__ ( self : Dict ) -> Tuple: SCREAMING_SNAKE_CASE__ : int =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =self.default_image_processor SCREAMING_SNAKE_CASE__ : int =prepare_img() SCREAMING_SNAKE_CASE__ : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] =model(**__lowercase ) # verify the logits SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] =torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) ) @slow def __magic_name__ ( self : Optional[int] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Tuple =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] =model.to(__lowercase ) SCREAMING_SNAKE_CASE__ : int =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) SCREAMING_SNAKE_CASE__ : int =prepare_img() SCREAMING_SNAKE_CASE__ : Optional[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[str] =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =outputs.logits # verify the logits SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Any =torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=__lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) ) @slow def __magic_name__ ( self : int ) -> Any: SCREAMING_SNAKE_CASE__ : List[Any] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =model.to(__lowercase ) SCREAMING_SNAKE_CASE__ : Any =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =prepare_img() SCREAMING_SNAKE_CASE__ : Any =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ : List[Any] =model(**__lowercase ) SCREAMING_SNAKE_CASE__ : int =outputs.logits.detach().cpu() SCREAMING_SNAKE_CASE__ : Any =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] ) SCREAMING_SNAKE_CASE__ : List[Any] =torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =image_processor.post_process_semantic_segmentation(outputs=__lowercase ) SCREAMING_SNAKE_CASE__ : int =torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowercase )
713
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def _a( UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : NDArray[floataa], UpperCamelCase__ : list[int], UpperCamelCase__ : int, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =coefficient_matrix.shape SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =constant_matrix.shape if rowsa != colsa: SCREAMING_SNAKE_CASE__ : Any =f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if colsa != 1: SCREAMING_SNAKE_CASE__ : str =f"Constant matrix must be nx1 but received {rowsa}x{colsa}" raise ValueError(UpperCamelCase__ ) if rowsa != rowsa: SCREAMING_SNAKE_CASE__ : str =( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f"received {rowsa}x{colsa} and {rowsa}x{colsa}" ) raise ValueError(UpperCamelCase__ ) if len(UpperCamelCase__ ) != rowsa: SCREAMING_SNAKE_CASE__ : Union[str, Any] =( '''Number of initial values must be equal to number of rows in coefficient ''' f"matrix but received {len(UpperCamelCase__ )} and {rowsa}" ) raise ValueError(UpperCamelCase__ ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) SCREAMING_SNAKE_CASE__ : NDArray[floataa] =np.concatenate( (coefficient_matrix, constant_matrix), axis=1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any =table.shape strictly_diagonally_dominant(UpperCamelCase__ ) # Iterates the whole matrix for given number of times for _ in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : List[str] =[] for row in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =0 for col in range(UpperCamelCase__ ): if col == row: SCREAMING_SNAKE_CASE__ : int =table[row][col] elif col == cols - 1: SCREAMING_SNAKE_CASE__ : Any =table[row][col] else: temp += (-1) * table[row][col] * init_val[col] SCREAMING_SNAKE_CASE__ : int =(temp + val) / denom new_val.append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =new_val return [float(UpperCamelCase__ ) for i in new_val] def _a( UpperCamelCase__ : NDArray[floataa] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =table.shape SCREAMING_SNAKE_CASE__ : Any =True for i in range(0, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : int =0 for j in range(0, cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ): snake_case_ = GPTaTokenizer snake_case_ = GPTaTokenizerFast snake_case_ = True snake_case_ = {"""add_prefix_space""": True} snake_case_ = False def __magic_name__ ( self : Any ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE__ : List[Any] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] SCREAMING_SNAKE_CASE__ : Any =dict(zip(__lowercase , range(len(__lowercase ) ) ) ) SCREAMING_SNAKE_CASE__ : Any =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] SCREAMING_SNAKE_CASE__ : Tuple ={'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) SCREAMING_SNAKE_CASE__ : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def __magic_name__ ( self : Any , **__lowercase : Optional[Any] ) -> str: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def __magic_name__ ( self : int , **__lowercase : Optional[Any] ) -> Tuple: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def __magic_name__ ( self : List[str] , __lowercase : Tuple ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Any ='''lower newer''' SCREAMING_SNAKE_CASE__ : List[str] ='''lower newer''' return input_text, output_text def __magic_name__ ( self : List[str] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : Union[str, Any] =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ : Tuple ='''lower newer''' SCREAMING_SNAKE_CASE__ : List[str] =['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) SCREAMING_SNAKE_CASE__ : Dict =tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Optional[Any] =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def __magic_name__ ( self : Tuple ) -> Union[str, Any]: if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : List[str] =self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple =self.get_rust_tokenizer(add_prefix_space=__lowercase ) SCREAMING_SNAKE_CASE__ : int ='''lower newer''' # Testing tokenization SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.tokenize(__lowercase , add_prefix_space=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =rust_tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing conversion to ids without special tokens SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] =rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing conversion to ids with special tokens SCREAMING_SNAKE_CASE__ : Optional[int] =self.get_rust_tokenizer(add_prefix_space=__lowercase ) SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase , add_prefix_space=__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =rust_tokenizer.encode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) # Testing the unknown token SCREAMING_SNAKE_CASE__ : Dict =tokens + [rust_tokenizer.unk_token] SCREAMING_SNAKE_CASE__ : Union[str, Any] =[14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def __magic_name__ ( self : Union[str, Any] , *__lowercase : Tuple , **__lowercase : Dict ) -> Tuple: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __magic_name__ ( self : Optional[int] , __lowercase : List[str]=15 ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase ) # Simple input SCREAMING_SNAKE_CASE__ : Dict ='''This is a simple input''' SCREAMING_SNAKE_CASE__ : List[Any] =['''This is a simple input 1''', '''This is a simple input 2'''] SCREAMING_SNAKE_CASE__ : Optional[Any] =('''This is a simple input''', '''This is a pair''') SCREAMING_SNAKE_CASE__ : List[str] =[ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Simple input self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Simple input self.assertRaises( __lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , ) # Pair input self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Pair input self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' ) # Pair input self.assertRaises( __lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , ) def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]: SCREAMING_SNAKE_CASE__ : List[str] =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input SCREAMING_SNAKE_CASE__ : Optional[int] ='''This is a simple input''' SCREAMING_SNAKE_CASE__ : str =['''This is a simple input looooooooong''', '''This is a simple input'''] SCREAMING_SNAKE_CASE__ : List[Any] =('''This is a simple input''', '''This is a pair''') SCREAMING_SNAKE_CASE__ : Optional[int] =[ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] SCREAMING_SNAKE_CASE__ : Dict =tokenizer.pad_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(*__lowercase , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) SCREAMING_SNAKE_CASE__ : Any =tokenizer(__lowercase , padding=__lowercase , truncate=__lowercase , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def __magic_name__ ( self : Union[str, Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ : Dict ='''$$$''' SCREAMING_SNAKE_CASE__ : List[Any] =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowercase , add_bos_token=__lowercase ) SCREAMING_SNAKE_CASE__ : int ='''This is a simple input''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''This is a simple input 1''', '''This is a simple input 2'''] SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.bos_token_id SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase ) SCREAMING_SNAKE_CASE__ : Dict =tokenizer(__lowercase ) self.assertEqual(out_s.input_ids[0] , __lowercase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) SCREAMING_SNAKE_CASE__ : Any =tokenizer.decode(out_s.input_ids ) SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __lowercase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __magic_name__ ( self : Tuple ) -> List[Any]: pass def __magic_name__ ( self : Any ) -> int: # TODO: change to self.get_tokenizers() when the fast version is implemented SCREAMING_SNAKE_CASE__ : Dict =[self.get_tokenizer(do_lower_case=__lowercase , add_bos_token=__lowercase )] for tokenizer in tokenizers: with self.subTest(F"{tokenizer.__class__.__name__}" ): SCREAMING_SNAKE_CASE__ : List[str] ='''Encode this.''' SCREAMING_SNAKE_CASE__ : List[Any] ='''This one too please.''' SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) encoded_sequence += tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.encode_plus( __lowercase , __lowercase , add_special_tokens=__lowercase , return_special_tokens_mask=__lowercase , ) SCREAMING_SNAKE_CASE__ : Any =encoded_sequence_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ : Tuple =encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(__lowercase ) , len(__lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] =[ (x if not special_tokens_mask[i] else None) for i, x in enumerate(__lowercase ) ] SCREAMING_SNAKE_CASE__ : int =[x for x in filtered_sequence if x is not None] self.assertEqual(__lowercase , __lowercase ) @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __magic_name__ ( self : List[Any] ) -> Tuple: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 SCREAMING_SNAKE_CASE__ : List[str] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowercase ) SCREAMING_SNAKE_CASE__ : List[str] ='''A photo of a cat''' SCREAMING_SNAKE_CASE__ : Dict =tokenizer.encode( __lowercase , ) self.assertEqual(__lowercase , [2, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('''test_opt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''./test_opt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode( __lowercase , ) self.assertEqual(__lowercase , [2, 2_50, 13_45, 9, 10, 47_58] ) def __magic_name__ ( self : Dict ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : List[str] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=__lowercase ) SCREAMING_SNAKE_CASE__ : int ='''A photo of a cat''' SCREAMING_SNAKE_CASE__ : Any =tokenizer.encode( __lowercase , ) # Same as above self.assertEqual(__lowercase , [2, 2_50, 13_45, 9, 10, 47_58] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def __magic_name__ ( self : List[str] ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] =AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=__lowercase ) SCREAMING_SNAKE_CASE__ : str ='''bos''' SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.get_vocab()['''bos'''] SCREAMING_SNAKE_CASE__ : Any ='''A photo of a cat''' SCREAMING_SNAKE_CASE__ : Any =tokenizer.encode( __lowercase , ) # We changed the bos token self.assertEqual(__lowercase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] ) tokenizer.save_pretrained('''./tok''' ) SCREAMING_SNAKE_CASE__ : List[Any] =AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) SCREAMING_SNAKE_CASE__ : str =tokenizer.encode( __lowercase , ) self.assertEqual(__lowercase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
714
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def _a( UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' with open(UpperCamelCase__ ) as metadata_file: SCREAMING_SNAKE_CASE__ : Optional[int] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =LukeConfig(use_entity_aware_attention=UpperCamelCase__, **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.load(UpperCamelCase__, map_location='''cpu''' )['''module'''] # Load the entity vocab file SCREAMING_SNAKE_CASE__ : List[str] =load_original_entity_vocab(UpperCamelCase__ ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE__ : Optional[int] =max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE__ : Optional[int] =XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE__ : List[Any] =AddedToken('''<ent>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Any =AddedToken('''<ent2>''', lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''r''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] =json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int ='''MLukeTokenizer''' with open(os.path.join(UpperCamelCase__, '''tokenizer_config.json''' ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__, MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f: json.dump(UpperCamelCase__, UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE__ : str =tokenizer.convert_tokens_to_ids(['''@'''] )[0] SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.convert_tokens_to_ids(['''#'''] )[0] SCREAMING_SNAKE_CASE__ : Dict =state_dict['''embeddings.word_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : List[str] =word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Tuple =word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[bias_name] SCREAMING_SNAKE_CASE__ : List[Any] =decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : str =decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] =torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE__ : Tuple =f"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE__ : Any =state_dict['''entity_embeddings.entity_embeddings.weight'''] SCREAMING_SNAKE_CASE__ : Any =entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE__ : Optional[int] =state_dict['''entity_predictions.bias'''] SCREAMING_SNAKE_CASE__ : Tuple =entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : Any =torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE__ : int =LukeForMaskedLM(config=UpperCamelCase__ ).eval() state_dict.pop('''entity_predictions.decoder.weight''' ) state_dict.pop('''lm_head.decoder.weight''' ) state_dict.pop('''lm_head.decoder.bias''' ) SCREAMING_SNAKE_CASE__ : Tuple =OrderedDict() for key, value in state_dict.items(): if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )): SCREAMING_SNAKE_CASE__ : Optional[Any] =state_dict[key] else: SCREAMING_SNAKE_CASE__ : Any =state_dict[key] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =model.load_state_dict(UpperCamelCase__, strict=UpperCamelCase__ ) if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(UpperCamelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE__ : Any =MLukeTokenizer.from_pretrained(UpperCamelCase__, task='''entity_classification''' ) SCREAMING_SNAKE_CASE__ : str ='''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).''' SCREAMING_SNAKE_CASE__ : Union[str, Any] =(0, 9) SCREAMING_SNAKE_CASE__ : str =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[Any] =model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : str =torch.Size((1, 3_3, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE__ : Any =torch.Size((1, 1, 7_6_8) ) SCREAMING_SNAKE_CASE__ : int =torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCamelCase__, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE__ : str =MLukeTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''Tokyo is the capital of <mask>.''' SCREAMING_SNAKE_CASE__ : Dict =(2_4, 3_0) SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer(UpperCamelCase__, entity_spans=[span], return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ : List[str] =model(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : List[Any] =encoding['''input_ids'''][0].tolist() SCREAMING_SNAKE_CASE__ : Any =input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) ) SCREAMING_SNAKE_CASE__ : List[Any] =outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] =outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE__ : Dict =[ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def _a( UpperCamelCase__ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] =['''[MASK]''', '''[PAD]''', '''[UNK]'''] SCREAMING_SNAKE_CASE__ : List[str] =[json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )] SCREAMING_SNAKE_CASE__ : Optional[int] ={} for entry in data: SCREAMING_SNAKE_CASE__ : Tuple =entry['''id'''] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE__ : str =entity_id break SCREAMING_SNAKE_CASE__ : Union[str, Any] =f"{language}:{entity_name}" SCREAMING_SNAKE_CASE__ : Union[str, Any] =entity_id return new_mapping if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) a_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
665
0
'''simple docstring''' def _a( UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =generate_pascal_triangle(UpperCamelCase__ ) for row_idx in range(UpperCamelCase__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx], end=''' ''' ) else: print(triangle[row_idx][col_idx], end='''''' ) print() def _a( UpperCamelCase__ : int ): '''simple docstring''' if not isinstance(UpperCamelCase__, UpperCamelCase__ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) SCREAMING_SNAKE_CASE__ : list[list[int]] =[] for current_row_idx in range(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Optional[Any] =populate_current_row(UpperCamelCase__, UpperCamelCase__ ) triangle.append(UpperCamelCase__ ) return triangle def _a( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] =[-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 SCREAMING_SNAKE_CASE__ : List[str] =1, 1 for current_col_idx in range(1, UpperCamelCase__ ): calculate_current_element( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) return current_row def _a( UpperCamelCase__ : list[list[int]], UpperCamelCase__ : list[int], UpperCamelCase__ : int, UpperCamelCase__ : int, ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict =triangle[current_row_idx - 1][current_col_idx - 1] SCREAMING_SNAKE_CASE__ : str =triangle[current_row_idx - 1][current_col_idx] SCREAMING_SNAKE_CASE__ : str =above_to_left_elt + above_to_right_elt def _a( UpperCamelCase__ : int ): '''simple docstring''' if not isinstance(UpperCamelCase__, UpperCamelCase__ ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) SCREAMING_SNAKE_CASE__ : list[list[int]] =[[1]] for row_index in range(1, UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ : Tuple =[0] + result[-1] + [0] SCREAMING_SNAKE_CASE__ : List[Any] =row_index + 1 # Calculate the number of distinct elements in a row SCREAMING_SNAKE_CASE__ : int =sum(divmod(UpperCamelCase__, 2 ) ) SCREAMING_SNAKE_CASE__ : str =[ temp_row[i - 1] + temp_row[i] for i in range(1, distinct_elements + 1 ) ] SCREAMING_SNAKE_CASE__ : int =row_first_half[: (row_index + 1) // 2] row_second_half.reverse() SCREAMING_SNAKE_CASE__ : Optional[int] =row_first_half + row_second_half result.append(UpperCamelCase__ ) return result def _a( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase__ : Callable, UpperCamelCase__ : int ) -> None: SCREAMING_SNAKE_CASE__ : List[Any] =f"{func.__name__}({value})" SCREAMING_SNAKE_CASE__ : Optional[int] =timeit(f"__main__.{call}", setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(1_5 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase__, UpperCamelCase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
715
'''simple docstring''' def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Optional[Any] =len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : Tuple =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : List[Any] =True for i in range(UpperCamelCase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: SCREAMING_SNAKE_CASE__ : Optional[int] =True if a[i].islower(): SCREAMING_SNAKE_CASE__ : List[Any] =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0