code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCAmelCase__ ( __lowercase ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE__ : CLIPSegProcessor , SCREAMING_SNAKE_CASE__ : AutoencoderKL , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE__ : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE__ : CLIPImageProcessor , ) -> Dict: super().__init__() if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1: __lowerCamelCase = ( f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`''' f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ''' '''to update the config accordingly as leaving `steps_offset` might led to incorrect results''' ''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,''' ''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`''' ''' file''' ) deprecate('''steps_offset!=1''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = dict(scheduler.config ) __lowerCamelCase = 1 __lowerCamelCase = FrozenDict(SCREAMING_SNAKE_CASE__ ) if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False: __lowerCamelCase = ( f'''The configuration file of this scheduler: {scheduler} has not set the configuration''' ''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make''' ''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to''' ''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face''' ''' Hub, it would be very nice if you could open a Pull request for the''' ''' `scheduler/scheduler_config.json` file''' ) deprecate('''skip_prk_steps not set''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = dict(scheduler.config ) __lowerCamelCase = True __lowerCamelCase = FrozenDict(SCREAMING_SNAKE_CASE__ ) if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered''' ''' results in services or applications open to the public. Both the diffusers team and Hugging Face''' ''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling''' ''' it only for use-cases that involve analyzing network behavior or auditing its results. For more''' ''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE__ , segmentation_processor=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = "auto" ) -> str: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __lowerCamelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Dict: self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __lowerCamelCase = torch.device('''cuda''' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __A ( self : Optional[Any] ) -> List[str]: if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE__ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 5_12 , SCREAMING_SNAKE_CASE__ : int = 5_12 , SCREAMING_SNAKE_CASE__ : int = 50 , SCREAMING_SNAKE_CASE__ : float = 7.5 , SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Union[str, Any]: __lowerCamelCase = self.segmentation_processor( text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device ) __lowerCamelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() __lowerCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0].resize(image.size ) # Run inpainting pipeline with the generated mask __lowerCamelCase = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , height=SCREAMING_SNAKE_CASE__ , width=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , output_type=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , callback=SCREAMING_SNAKE_CASE__ , callback_steps=SCREAMING_SNAKE_CASE__ , )
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version SCREAMING_SNAKE_CASE__ : Tuple = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" SCREAMING_SNAKE_CASE__ : List[str] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" SCREAMING_SNAKE_CASE__ : Dict = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __A ( self : Optional[Any] ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[ '''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''', '''https://en.wikipedia.org/wiki/METEOR''', ] , ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Dict: import nltk nltk.download('''wordnet''' ) if NLTK_VERSION >= version.Version('''3.6.5''' ): nltk.download('''punkt''' ) if NLTK_VERSION >= version.Version('''3.6.6''' ): nltk.download('''omw-1.4''' ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=0.9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.5 ) -> Dict: if NLTK_VERSION >= version.Version('''3.6.5''' ): __lowerCamelCase = [ meteor_score.single_meteor_score( word_tokenize(SCREAMING_SNAKE_CASE__ ) , word_tokenize(SCREAMING_SNAKE_CASE__ ) , alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , gamma=SCREAMING_SNAKE_CASE__ ) for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] else: __lowerCamelCase = [ meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , alpha=SCREAMING_SNAKE_CASE__ , beta=SCREAMING_SNAKE_CASE__ , gamma=SCREAMING_SNAKE_CASE__ ) for ref, pred in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] return {"meteor": np.mean(SCREAMING_SNAKE_CASE__ )}
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowerCAmelCase__ ( __lowercase , __lowercase ): @register_to_config def __init__( self : str , SCREAMING_SNAKE_CASE__ : int = 7_68 , ) -> Optional[Any]: super().__init__() __lowerCamelCase = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE__ ) ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Union[str, torch.device]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.dtype] = None , ) -> Dict: __lowerCamelCase = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) ) return self def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: __lowerCamelCase = (embeds - self.mean) * 1.0 / self.std return embeds def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = (embeds * self.std) + self.mean return embeds
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "sentencepiece.bpe.model"} SCREAMING_SNAKE_CASE__ : List[Any] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } SCREAMING_SNAKE_CASE__ : int = { "camembert-base": 512, } SCREAMING_SNAKE_CASE__ : Any = "▁" class lowerCAmelCase__ ( __lowercase ): a__ : Dict = VOCAB_FILES_NAMES a__ : Dict = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : Dict="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : str="<pad>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<mask>" , SCREAMING_SNAKE_CASE__ : List[str]=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __lowerCamelCase = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} __lowerCamelCase = len(self.fairseq_tokens_to_ids ) __lowerCamelCase = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) __lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] __lowerCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __A ( self : Tuple ) -> Union[str, Any]: return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def __A ( self : int ) -> Tuple: __lowerCamelCase = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any: __lowerCamelCase = [] __lowerCamelCase = '''''' __lowerCamelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token __lowerCamelCase = True __lowerCamelCase = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __getstate__( self : int ) -> int: __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any: __lowerCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCamelCase = {} __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : str = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
def __magic_name__ ( ) -> list[list[int]]: return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] SCREAMING_SNAKE_CASE__ : List[Any] = generate_large_matrix() SCREAMING_SNAKE_CASE__ : Any = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def __magic_name__ ( __lowerCAmelCase : list[list[int]] ) -> None: assert all(row == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for row in grid ) assert all(list(__lowerCAmelCase ) == sorted(__lowerCAmelCase , reverse=__lowerCAmelCase ) for col in zip(*__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list[int] ) -> int: __lowerCamelCase = 0 __lowerCamelCase = len(__lowerCAmelCase ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __lowerCamelCase = (left + right) // 2 __lowerCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __lowerCamelCase = mid + 1 else: __lowerCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : list[list[int]] ) -> int: __lowerCamelCase = 0 __lowerCamelCase = len(grid[0] ) for i in range(len(__lowerCAmelCase ) ): __lowerCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(__lowerCAmelCase ) * len(grid[0] )) - total def __magic_name__ ( __lowerCAmelCase : list[list[int]] ) -> int: return len([number for row in grid for number in row if number < 0] ) def __magic_name__ ( __lowerCAmelCase : list[list[int]] ) -> int: __lowerCamelCase = 0 for row in grid: for i, number in enumerate(__lowerCAmelCase ): if number < 0: total += len(__lowerCAmelCase ) - i break return total def __magic_name__ ( ) -> None: from timeit import timeit print('''Running benchmarks''' ) __lowerCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __lowerCamelCase = timeit(f'''{func}(grid=grid)''' , setup=__lowerCAmelCase , number=500 ) print(f'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : int = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : int = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : Tuple = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : Tuple = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : str = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } SCREAMING_SNAKE_CASE__ : Any = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } SCREAMING_SNAKE_CASE__ : List[str] = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } SCREAMING_SNAKE_CASE__ : Optional[Any] = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } SCREAMING_SNAKE_CASE__ : List[Any] = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } SCREAMING_SNAKE_CASE__ : Dict = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : int = VOCAB_FILES_NAMES a__ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION a__ : Dict = DPRContextEncoderTokenizer class lowerCAmelCase__ ( __lowercase ): a__ : Any = VOCAB_FILES_NAMES a__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : int = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a__ : int = DPRQuestionEncoderTokenizer SCREAMING_SNAKE_CASE__ : Union[str, Any] = collections.namedtuple( "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"] ) SCREAMING_SNAKE_CASE__ : Optional[int] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"]) SCREAMING_SNAKE_CASE__ : Optional[int] = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(__lowercase ) class lowerCAmelCase__ : def __call__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str] = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) elif titles is None or texts is None: __lowerCamelCase = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles] __lowerCamelCase = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts] __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages assert len(SCREAMING_SNAKE_CASE__ ) == len( SCREAMING_SNAKE_CASE__ ), f'''There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.''' __lowerCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids'''] __lowerCamelCase = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['''input_ids'''] __lowerCamelCase = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] } if return_attention_mask is not False: __lowerCamelCase = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowerCamelCase = attention_mask return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : BatchEncoding , SCREAMING_SNAKE_CASE__ : DPRReaderOutput , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : int = 64 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> List[DPRSpanPrediction]: __lowerCamelCase = reader_input['''input_ids'''] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = reader_output[:3] __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ ) __lowerCamelCase = [] for doc_id in sorted_docs: __lowerCamelCase = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowerCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowerCamelCase = sequence_ids.index(self.pad_token_id ) else: __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ) -> List[DPRSpanPrediction]: __lowerCamelCase = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [] for (start_index, end_index), score in scores: assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]''' __lowerCamelCase = end_index - start_index + 1 assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}''' if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowercase ) class lowerCAmelCase__ ( __lowercase , __lowercase ): a__ : Tuple = VOCAB_FILES_NAMES a__ : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP a__ : List[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[str] = READER_PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = ["""input_ids""", """attention_mask"""] a__ : List[Any] = DPRReaderTokenizer
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase__ ( __lowercase ): a__ : str = """philschmid/bart-large-cnn-samsum""" a__ : Union[str, Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) a__ : List[str] = """summarizer""" a__ : Union[str, Any] = AutoTokenizer a__ : Tuple = AutoModelForSeqaSeqLM a__ : int = ["""text"""] a__ : List[Any] = ["""text"""] def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: return self.pre_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE__ ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any: return self.model.generate(**SCREAMING_SNAKE_CASE__ )[0] def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return self.pre_processor.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[str] ) -> Dict: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __A ( self : Optional[Any] ) -> Any: __lowerCamelCase = ort.SessionOptions() __lowerCamelCase = False return options def __A ( self : Any ) -> str: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __lowerCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''A red cat sitting on a park bench''' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __lowerCamelCase = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __A ( self : Optional[int] ) -> int: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo.png''' ) __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' ) __lowerCamelCase = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' ) __lowerCamelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained( '''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''A red cat sitting on a park bench''' __lowerCamelCase = np.random.RandomState(0 ) __lowerCamelCase = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ) __lowerCamelCase = output.images __lowerCamelCase = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) __lowerCamelCase = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class lowerCAmelCase__ : def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=14 , SCREAMING_SNAKE_CASE__ : Any=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : Any=0.02 , ) -> Optional[int]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = rotary_dim __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = initializer_range __lowerCamelCase = None __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 __lowerCamelCase = vocab_size - 1 def __A ( self : int ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def __A ( self : List[str] ) -> Any: __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int: __lowerCamelCase = 20 __lowerCamelCase = model_class_name(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) __lowerCamelCase = model( input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: __lowerCamelCase = 20 __lowerCamelCase = model_class_name(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCamelCase = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCamelCase = model( input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' ) __lowerCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) @require_flax class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ): a__ : Any = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () a__ : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else () def __A ( self : Optional[Any] ) -> Tuple: __lowerCamelCase = FlaxGPTJModelTester(self ) def __A ( self : Union[str, Any] ) -> Any: for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] ) -> Optional[Any]: for model_class_name in self.all_model_classes: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @tooslow def __A ( self : List[Any] ) -> Optional[Any]: __lowerCamelCase = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' ) __lowerCamelCase = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) __lowerCamelCase = False __lowerCamelCase = model.config.eos_token_id __lowerCamelCase = jax.jit(model.generate ) __lowerCamelCase = jit_generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @is_pt_flax_cross_test def __A ( self : Dict ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase = pt_inputs['''input_ids'''].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval() __lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) __lowerCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = fx_state with torch.no_grad(): __lowerCamelCase = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple() __lowerCamelCase = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def __A ( self : str ) -> Optional[Any]: __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval() __lowerCamelCase = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa ) __lowerCamelCase = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params ) __lowerCamelCase , __lowerCamelCase = pt_inputs['''input_ids'''].shape __lowerCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = 0 __lowerCamelCase = 1 __lowerCamelCase = 0 __lowerCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCamelCase = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple() __lowerCamelCase = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ ) with torch.no_grad(): __lowerCamelCase = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual( len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def __A ( self : Tuple ) -> Union[str, Any]: for model_class_name in self.all_model_classes: __lowerCamelCase = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) __lowerCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : str = { "facebook/nllb-large-en-ro": 1_024, "facebook/nllb-200-distilled-600M": 1_024, } # fmt: off SCREAMING_SNAKE_CASE__ : str = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : List[str] = PRETRAINED_VOCAB_FILES_MAP a__ : int = ["""input_ids""", """attention_mask"""] a__ : List[str] = NllbTokenizer a__ : List[int] = [] a__ : List[int] = [] def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Dict="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Any="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token __lowerCamelCase = legacy_behaviour super().__init__( vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = vocab_file __lowerCamelCase = False if not self.vocab_file else True __lowerCamelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) __lowerCamelCase = { lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __lowerCamelCase = src_lang if src_lang is not None else '''eng_Latn''' __lowerCamelCase = self.convert_tokens_to_ids(self._src_lang ) __lowerCamelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __A ( self : int ) -> str: return self._src_lang @src_lang.setter def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> None: __lowerCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) __lowerCamelCase = src_lang __lowerCamelCase = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tgt_lang_id return inputs def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> BatchEncoding: __lowerCamelCase = src_lang __lowerCamelCase = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> List[str]: return self.set_src_lang_special_tokens(self.src_lang ) def __A ( self : Union[str, Any] ) -> Tuple: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> None: __lowerCamelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) if self.legacy_behaviour: __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase = [self.cur_lang_code] __lowerCamelCase = [self.eos_token_id] __lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowerCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __A ( self : int , SCREAMING_SNAKE_CASE__ : str ) -> None: __lowerCamelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) if self.legacy_behaviour: __lowerCamelCase = [] __lowerCamelCase = [self.eos_token_id, self.cur_lang_code] else: __lowerCamelCase = [self.cur_lang_code] __lowerCamelCase = [self.eos_token_id] __lowerCamelCase = self.convert_ids_to_tokens(self.prefix_tokens ) __lowerCamelCase = self.convert_ids_to_tokens(self.suffix_tokens ) __lowerCamelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
import numpy as np def __magic_name__ ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float = 1E-12 , __lowerCAmelCase : int = 100 , ) -> tuple[float, np.ndarray]: assert np.shape(__lowerCAmelCase )[0] == np.shape(__lowerCAmelCase )[1] # Ensure proper dimensionality. assert np.shape(__lowerCAmelCase )[0] == np.shape(__lowerCAmelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__lowerCAmelCase ) == np.iscomplexobj(__lowerCAmelCase ) __lowerCamelCase = np.iscomplexobj(__lowerCAmelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__lowerCAmelCase , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCamelCase = False __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCamelCase = np.dot(__lowerCAmelCase , __lowerCAmelCase ) # Normalize the resulting output vector. __lowerCamelCase = w / np.linalg.norm(__lowerCAmelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCamelCase = vector.conj().T if is_complex else vector.T __lowerCamelCase = np.dot(__lowerCAmelCase , np.dot(__lowerCAmelCase , __lowerCAmelCase ) ) # Check convergence. __lowerCamelCase = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCamelCase = True __lowerCamelCase = lambda_ if is_complex: __lowerCamelCase = np.real(lambda_ ) return lambda_, vector def __magic_name__ ( ) -> None: __lowerCamelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __lowerCamelCase = np.array([41, 4, 20] ) __lowerCamelCase = real_input_matrix.astype(np.complexaaa ) __lowerCamelCase = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCamelCase = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCamelCase = real_input_matrix __lowerCamelCase = real_vector elif problem_type == "complex": __lowerCamelCase = complex_input_matrix __lowerCamelCase = complex_vector # Our implementation. __lowerCamelCase , __lowerCamelCase = power_iteration(__lowerCAmelCase , __lowerCAmelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCamelCase , __lowerCamelCase = np.linalg.eigh(__lowerCAmelCase ) # Last eigenvalue is the maximum one. __lowerCamelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCamelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__lowerCAmelCase ) - np.abs(__lowerCAmelCase ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
def __magic_name__ ( __lowerCAmelCase : int = 50 ) -> int: __lowerCamelCase = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F'{solution() = }')
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) class lowerCAmelCase__ ( __lowercase ): def __init__( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ) -> None: warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=__lowercase ) class lowerCAmelCase__ ( __lowercase ): a__ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a__ : ClassVar[Features] = Features({"""audio""": Audio()} ) a__ : ClassVar[Features] = Features({"""labels""": ClassLabel} ) a__ : str = "audio" a__ : str = "labels" def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]: if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE__ ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __lowerCamelCase = copy.deepcopy(self ) __lowerCamelCase = self.label_schema.copy() __lowerCamelCase = features[self.label_column] __lowerCamelCase = label_schema return task_template @property def __A ( self : Union[str, Any] ) -> Dict[str, str]: return { self.audio_column: "audio", self.label_column: "labels", }
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class lowerCAmelCase__ ( __lowercase ): a__ : BigBirdConfig a__ : jnp.dtype = jnp.floataa a__ : bool = True def __A ( self : List[Any] ) -> Optional[Any]: super().setup() __lowerCamelCase = nn.Dense(5 , dtype=self.dtype ) def __call__( self : str , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: __lowerCamelCase = super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class lowerCAmelCase__ ( __lowercase ): a__ : Dict = FlaxBigBirdForNaturalQuestionsModule def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> int: def cross_entropy(__lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str]=None ): __lowerCamelCase = logits.shape[-1] __lowerCamelCase = (labels[..., None] == jnp.arange(__lowerCAmelCase )[None]).astype('''f4''' ) __lowerCamelCase = jax.nn.log_softmax(__lowerCAmelCase , axis=-1 ) __lowerCamelCase = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: __lowerCamelCase = reduction(__lowerCAmelCase ) return loss __lowerCamelCase = partial(__lowerCAmelCase , reduction=jnp.mean ) __lowerCamelCase = cross_entropy(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = cross_entropy(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = cross_entropy(__lowerCAmelCase , __lowerCAmelCase ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class lowerCAmelCase__ : a__ : str = "google/bigbird-roberta-base" a__ : int = 3_000 a__ : int = 10_500 a__ : int = 128 a__ : int = 3 a__ : int = 1 a__ : int = 5 # tx_args a__ : float = 3e-5 a__ : float = 0.0 a__ : int = 20_000 a__ : float = 0.0095 a__ : str = "bigbird-roberta-natural-questions" a__ : str = "training-expt" a__ : str = "data/nq-training.jsonl" a__ : str = "data/nq-validation.jsonl" def __A ( self : Optional[Any] ) -> Optional[int]: os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = os.path.join(self.base_dir , self.save_dir ) __lowerCamelCase = self.batch_size_per_device * jax.device_count() @dataclass class lowerCAmelCase__ : a__ : int a__ : int = 4_096 # no dynamic padding on TPUs def __call__( self : str , SCREAMING_SNAKE_CASE__ : Any ) -> int: __lowerCamelCase = self.collate_fn(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return batch def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = self.fetch_inputs(features['''input_ids'''] ) __lowerCamelCase = { '''input_ids''': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ), '''attention_mask''': jnp.array(SCREAMING_SNAKE_CASE__ , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : list ) -> str: __lowerCamelCase = [self._fetch_inputs(SCREAMING_SNAKE_CASE__ ) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : list ) -> Dict: __lowerCamelCase = [1 for _ in range(len(SCREAMING_SNAKE_CASE__ ) )] while len(SCREAMING_SNAKE_CASE__ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None ) -> List[str]: if seed is not None: __lowerCamelCase = dataset.shuffle(seed=__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) // batch_size ): __lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size] yield dict(__lowerCAmelCase ) @partial(jax.pmap , axis_name='''batch''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Tuple ) -> List[Any]: def loss_fn(__lowerCAmelCase : Optional[int] ): __lowerCamelCase = model_inputs.pop('''start_labels''' ) __lowerCamelCase = model_inputs.pop('''end_labels''' ) __lowerCamelCase = model_inputs.pop('''pooled_labels''' ) __lowerCamelCase = state.apply_fn(**__lowerCAmelCase , params=__lowerCAmelCase , dropout_rng=__lowerCAmelCase , train=__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = outputs return state.loss_fn( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) __lowerCamelCase , __lowerCamelCase = jax.random.split(__lowerCAmelCase ) __lowerCamelCase = jax.value_and_grad(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = grad_fn(state.params ) __lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) __lowerCamelCase = jax.lax.pmean(__lowerCAmelCase , '''batch''' ) __lowerCamelCase = state.apply_gradients(grads=__lowerCAmelCase ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def __magic_name__ ( __lowerCAmelCase : int , **__lowerCAmelCase : List[Any] ) -> int: __lowerCamelCase = model_inputs.pop('''start_labels''' ) __lowerCamelCase = model_inputs.pop('''end_labels''' ) __lowerCamelCase = model_inputs.pop('''pooled_labels''' ) __lowerCamelCase = state.apply_fn(**__lowerCAmelCase , params=state.params , train=__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = outputs __lowerCamelCase = state.loss_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class lowerCAmelCase__ ( train_state.TrainState ): a__ : Callable = struct.field(pytree_node=__lowercase ) @dataclass class lowerCAmelCase__ : a__ : Args a__ : Callable a__ : Callable a__ : Callable a__ : Callable a__ : wandb a__ : Callable = None def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=None ) -> int: __lowerCamelCase = model.params __lowerCamelCase = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , loss_fn=SCREAMING_SNAKE_CASE__ , ) if ckpt_dir is not None: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = restore_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } __lowerCamelCase , __lowerCamelCase = build_tx(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = train_state.TrainState( step=SCREAMING_SNAKE_CASE__ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE__ , tx=SCREAMING_SNAKE_CASE__ , opt_state=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = args __lowerCamelCase = data_collator __lowerCamelCase = lr __lowerCamelCase = params __lowerCamelCase = jax_utils.replicate(SCREAMING_SNAKE_CASE__ ) return state def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: __lowerCamelCase = self.args __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) // args.batch_size __lowerCamelCase = jax.random.PRNGKey(0 ) __lowerCamelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() ) for epoch in range(args.max_epochs ): __lowerCamelCase = jnp.array(0 , dtype=jnp.floataa ) __lowerCamelCase = get_batched_dataset(SCREAMING_SNAKE_CASE__ , args.batch_size , seed=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc=f'''Running EPOCH-{epoch}''' ): __lowerCamelCase = self.data_collator(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.train_step_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: __lowerCamelCase = jax_utils.unreplicate(state.step ) __lowerCamelCase = running_loss.item() / i __lowerCamelCase = self.scheduler_fn(state_step - 1 ) __lowerCamelCase = self.evaluate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE__ ) ) self.logger.log(SCREAMING_SNAKE_CASE__ , commit=SCREAMING_SNAKE_CASE__ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: __lowerCamelCase = get_batched_dataset(SCREAMING_SNAKE_CASE__ , self.args.batch_size ) __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) // self.args.batch_size __lowerCamelCase = jnp.array(0 , dtype=jnp.floataa ) __lowerCamelCase = 0 for batch in tqdm(SCREAMING_SNAKE_CASE__ , total=SCREAMING_SNAKE_CASE__ , desc='''Evaluating ... ''' ): __lowerCamelCase = self.data_collator(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.val_step_fn(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ) -> str: __lowerCamelCase = jax_utils.unreplicate(SCREAMING_SNAKE_CASE__ ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' ) self.model_save_fn(SCREAMING_SNAKE_CASE__ , params=state.params ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE__ , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE__ , '''data_collator.joblib''' ) ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , SCREAMING_SNAKE_CASE__ ) print('''DONE''' ) def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> int: print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' ) with open(os.path.join(__lowerCAmelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f: __lowerCamelCase = from_bytes(state.params , f.read() ) with open(os.path.join(__lowerCAmelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f: __lowerCamelCase = from_bytes(state.opt_state , f.read() ) __lowerCamelCase = joblib.load(os.path.join(__lowerCAmelCase , '''args.joblib''' ) ) __lowerCamelCase = joblib.load(os.path.join(__lowerCAmelCase , '''data_collator.joblib''' ) ) with open(os.path.join(__lowerCAmelCase , '''training_state.json''' ) , '''r''' ) as f: __lowerCamelCase = json.load(__lowerCAmelCase ) __lowerCamelCase = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> Any: __lowerCamelCase = num_train_steps - warmup_steps __lowerCamelCase = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=__lowerCAmelCase , transition_steps=__lowerCAmelCase ) __lowerCamelCase = optax.linear_schedule(init_value=__lowerCAmelCase , end_value=1E-7 , transition_steps=__lowerCAmelCase ) __lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ) -> Optional[int]: def weight_decay_mask(__lowerCAmelCase : int ): __lowerCamelCase = traverse_util.flatten_dict(__lowerCAmelCase ) __lowerCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(__lowerCAmelCase ) __lowerCamelCase = scheduler_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = optax.adamw(learning_rate=__lowerCAmelCase , weight_decay=__lowerCAmelCase , mask=__lowerCAmelCase ) return tx, lr
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"} class lowerCAmelCase__ ( __lowercase ): a__ : Optional[Any] = """ctrl""" a__ : str = ["""past_key_values"""] a__ : Any = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=24_65_34 , SCREAMING_SNAKE_CASE__ : Any=2_56 , SCREAMING_SNAKE_CASE__ : Tuple=12_80 , SCREAMING_SNAKE_CASE__ : Tuple=81_92 , SCREAMING_SNAKE_CASE__ : str=48 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1e-6 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Any=True , **SCREAMING_SNAKE_CASE__ : str , ) -> List[Any]: __lowerCamelCase = vocab_size __lowerCamelCase = n_positions __lowerCamelCase = n_embd __lowerCamelCase = n_layer __lowerCamelCase = n_head __lowerCamelCase = dff __lowerCamelCase = resid_pdrop __lowerCamelCase = embd_pdrop __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_range __lowerCamelCase = use_cache super().__init__(**SCREAMING_SNAKE_CASE__ )
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
def __magic_name__ ( __lowerCAmelCase : int ) -> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 __lowerCamelCase = 1 __lowerCamelCase = 1 while repunit: __lowerCamelCase = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def __magic_name__ ( __lowerCAmelCase : int = 100_0000 ) -> int: __lowerCamelCase = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__lowerCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F'{solution() = }')
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
from manim import * class lowerCAmelCase__ ( __lowercase ): def __A ( self : Dict ) -> int: __lowerCamelCase = Rectangle(height=0.5 , width=0.5 ) __lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 ) __lowerCamelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 ) __lowerCamelCase = VGroup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 ) __lowerCamelCase = Text('''CPU''' , font_size=24 ) __lowerCamelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [mem.copy() for i in range(1 )] __lowerCamelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 ) __lowerCamelCase = Text('''GPU''' , font_size=24 ) __lowerCamelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ ) gpu.align_to(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [mem.copy() for i in range(6 )] __lowerCamelCase = VGroup(*SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0 ) __lowerCamelCase = Text('''Model''' , font_size=24 ) __lowerCamelCase = Group(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).arrange(SCREAMING_SNAKE_CASE__ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE__ ) model.move_to([3, -1.0, 0] ) self.play( Create(SCREAMING_SNAKE_CASE__ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE__ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE__ , run_time=1 ) , ) __lowerCamelCase = MarkupText( f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , ) __lowerCamelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowerCamelCase = MarkupText( f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(SCREAMING_SNAKE_CASE__ , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE__ ) , Write(SCREAMING_SNAKE_CASE__ ) ) self.add(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for i, rect in enumerate(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE__ , opacity=0.7 ) cpu_target.move_to(SCREAMING_SNAKE_CASE__ ) cpu_target.generate_target() __lowerCamelCase = 0.46 / 4 __lowerCamelCase = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE__ , buff=0.0 ) cpu_targs.append(SCREAMING_SNAKE_CASE__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE__ ) ) second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE__ , run_time=1.5 ) ) self.play(*SCREAMING_SNAKE_CASE__ ) self.play(*SCREAMING_SNAKE_CASE__ ) self.wait()
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCAmelCase__ ( __lowercase ): def __init__( self : str ) -> List[str]: __lowerCamelCase = [] def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: self.events.append('''on_init_end''' ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict: self.events.append('''on_train_begin''' ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: self.events.append('''on_train_end''' ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple: self.events.append('''on_epoch_begin''' ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: self.events.append('''on_epoch_end''' ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]: self.events.append('''on_step_begin''' ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> List[str]: self.events.append('''on_step_end''' ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict: self.events.append('''on_evaluate''' ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: self.events.append('''on_predict''' ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple: self.events.append('''on_save''' ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: self.events.append('''on_log''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]: self.events.append('''on_prediction_step''' ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Dict ) -> int: __lowerCamelCase = tempfile.mkdtemp() def __A ( self : Tuple ) -> List[Any]: shutil.rmtree(self.output_dir ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : int=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. __lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ ) return Trainer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any: self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) # Order doesn't matter __lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ ) __lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ ) for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ ) elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ ) else: self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]: __lowerCamelCase = ['''on_init_end''', '''on_train_begin'''] __lowerCamelCase = 0 __lowerCamelCase = len(trainer.get_eval_dataloader() ) __lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate'''] for _ in range(trainer.state.num_train_epochs ): expected_events.append('''on_epoch_begin''' ) for _ in range(SCREAMING_SNAKE_CASE__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('''on_log''' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('''on_save''' ) expected_events.append('''on_epoch_end''' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def __A ( self : List[str] ) -> str: __lowerCamelCase = self.get_trainer() __lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) # Callbacks passed at init are added to the default callbacks __lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback __lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) def __A ( self : str ) -> Optional[Any]: __lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback] __lowerCamelCase = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE__ ) expected_callbacks.remove(SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_trainer() __lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ ) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) trainer.add_callback(SCREAMING_SNAKE_CASE__ ) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) # We can also add, pop, or remove by instance __lowerCamelCase = self.get_trainer() __lowerCamelCase = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE__ ) expected_callbacks.remove(SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_trainer() __lowerCamelCase = trainer.callback_handler.callbacks[0] __lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) trainer.add_callback(SCREAMING_SNAKE_CASE__ ) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] ) -> Optional[int]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() __lowerCamelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) ) # Independent log/save/eval __lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() __lowerCamelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() __lowerCamelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' ) trainer.train() __lowerCamelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' ) trainer.train() __lowerCamelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) ) # A bit of everything __lowerCamelCase = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , ) trainer.train() __lowerCamelCase = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) ) # warning should be emitted for duplicated callbacks with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock: __lowerCamelCase = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[str] = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> str: for attribute in key.split('''.''' ): __lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ) if weight_type is not None: __lowerCamelCase = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape else: __lowerCamelCase = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __lowerCamelCase = value elif weight_type == "weight_g": __lowerCamelCase = value elif weight_type == "weight_v": __lowerCamelCase = value elif weight_type == "bias": __lowerCamelCase = value else: __lowerCamelCase = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> List[Any]: __lowerCamelCase = [] __lowerCamelCase = fairseq_model.state_dict() __lowerCamelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCamelCase = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowerCamelCase = True else: for key, mapped_key in MAPPING.items(): __lowerCamelCase = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __lowerCamelCase = True if "*" in mapped_key: __lowerCamelCase = name.split(__lowerCAmelCase )[0].split('''.''' )[-2] __lowerCamelCase = mapped_key.replace('''*''' , __lowerCAmelCase ) if "weight_g" in name: __lowerCamelCase = '''weight_g''' elif "weight_v" in name: __lowerCamelCase = '''weight_v''' elif "weight" in name: __lowerCamelCase = '''weight''' elif "bias" in name: __lowerCamelCase = '''bias''' else: __lowerCamelCase = None set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> int: __lowerCamelCase = full_name.split('''conv_layers.''' )[-1] __lowerCamelCase = name.split('''.''' ) __lowerCamelCase = int(items[0] ) __lowerCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __lowerCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __lowerCamelCase = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> Optional[int]: __lowerCamelCase = SEWConfig() if is_finetuned: __lowerCamelCase = model.wav_encoder.wav_model.cfg else: __lowerCamelCase = model.cfg __lowerCamelCase = fs_config.conv_bias __lowerCamelCase = eval(fs_config.conv_feature_layers ) __lowerCamelCase = [x[0] for x in conv_layers] __lowerCamelCase = [x[1] for x in conv_layers] __lowerCamelCase = [x[2] for x in conv_layers] __lowerCamelCase = '''gelu''' __lowerCamelCase = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group''' __lowerCamelCase = 0.0 __lowerCamelCase = fs_config.activation_fn.name __lowerCamelCase = fs_config.encoder_embed_dim __lowerCamelCase = 0.02 __lowerCamelCase = fs_config.encoder_ffn_embed_dim __lowerCamelCase = 1E-5 __lowerCamelCase = fs_config.encoder_layerdrop __lowerCamelCase = fs_config.encoder_attention_heads __lowerCamelCase = fs_config.conv_pos_groups __lowerCamelCase = fs_config.conv_pos __lowerCamelCase = len(__lowerCAmelCase ) __lowerCamelCase = fs_config.encoder_layers __lowerCamelCase = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: __lowerCamelCase = model.cfg __lowerCamelCase = fs_config.final_dropout __lowerCamelCase = fs_config.layerdrop __lowerCamelCase = fs_config.activation_dropout __lowerCamelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 __lowerCamelCase = fs_config.attention_dropout __lowerCamelCase = fs_config.dropout_input __lowerCamelCase = fs_config.dropout __lowerCamelCase = fs_config.mask_channel_length __lowerCamelCase = fs_config.mask_channel_prob __lowerCamelCase = fs_config.mask_length __lowerCamelCase = fs_config.mask_prob __lowerCamelCase = '''Wav2Vec2FeatureExtractor''' __lowerCamelCase = '''Wav2Vec2CTCTokenizer''' return config @torch.no_grad() def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=True ) -> int: if is_finetuned: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: __lowerCamelCase = SEWConfig.from_pretrained(__lowerCAmelCase ) else: __lowerCamelCase = convert_config(model[0] , __lowerCAmelCase ) __lowerCamelCase = model[0].eval() __lowerCamelCase = True if config.feat_extract_norm == '''layer''' else False __lowerCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) if is_finetuned: if dict_path: __lowerCamelCase = Dictionary.load(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowerCamelCase = target_dict.pad_index __lowerCamelCase = target_dict.bos_index __lowerCamelCase = target_dict.pad_index __lowerCamelCase = target_dict.bos_index __lowerCamelCase = target_dict.eos_index __lowerCamelCase = len(target_dict.symbols ) __lowerCamelCase = os.path.join(__lowerCAmelCase , '''vocab.json''' ) if not os.path.isdir(__lowerCAmelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __lowerCAmelCase ) __lowerCamelCase = WavaVecaCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__lowerCAmelCase , ) __lowerCamelCase = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) __lowerCamelCase = SEWForCTC(__lowerCAmelCase ) else: __lowerCamelCase = SEWModel(__lowerCAmelCase ) feature_extractor.save_pretrained(__lowerCAmelCase ) recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) hf_model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer SCREAMING_SNAKE_CASE__ : Tuple = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast SCREAMING_SNAKE_CASE__ : List[Any] = TaTokenizerFast SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "MT5EncoderModel", "MT5ForConditionalGeneration", "MT5ForQuestionAnswering", "MT5Model", "MT5PreTrainedModel", "MT5Stack", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys SCREAMING_SNAKE_CASE__ : Optional[Any] = _LazyModule( __name__, globals()["__file__"], _import_structure, extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast}, module_spec=__spec__, )
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Tuple = { "configuration_roberta_prelayernorm": [ "ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaPreLayerNormConfig", "RobertaPreLayerNormOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaPreLayerNormForCausalLM", "RobertaPreLayerNormForMaskedLM", "RobertaPreLayerNormForMultipleChoice", "RobertaPreLayerNormForQuestionAnswering", "RobertaPreLayerNormForSequenceClassification", "RobertaPreLayerNormForTokenClassification", "RobertaPreLayerNormModel", "RobertaPreLayerNormPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = [ "TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaPreLayerNormForCausalLM", "TFRobertaPreLayerNormForMaskedLM", "TFRobertaPreLayerNormForMultipleChoice", "TFRobertaPreLayerNormForQuestionAnswering", "TFRobertaPreLayerNormForSequenceClassification", "TFRobertaPreLayerNormForTokenClassification", "TFRobertaPreLayerNormMainLayer", "TFRobertaPreLayerNormModel", "TFRobertaPreLayerNormPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Dict = [ "FlaxRobertaPreLayerNormForCausalLM", "FlaxRobertaPreLayerNormForMaskedLM", "FlaxRobertaPreLayerNormForMultipleChoice", "FlaxRobertaPreLayerNormForQuestionAnswering", "FlaxRobertaPreLayerNormForSequenceClassification", "FlaxRobertaPreLayerNormForTokenClassification", "FlaxRobertaPreLayerNormModel", "FlaxRobertaPreLayerNormPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) def __magic_name__ ( __lowerCAmelCase : Any ) -> Tuple: __lowerCamelCase = '''huggingface/label-files''' __lowerCamelCase = '''imagenet-1k-id2label.json''' __lowerCamelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) __lowerCamelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = '''std_conv''' if '''bit''' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" __lowerCamelCase = BitConfig( conv_layer=__lowerCAmelCase , num_labels=1000 , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , ) return config def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> Optional[int]: if "stem.conv" in name: __lowerCamelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' ) if "blocks" in name: __lowerCamelCase = name.replace('''blocks''' , '''layers''' ) if "head.fc" in name: __lowerCamelCase = name.replace('''head.fc''' , '''classifier.1''' ) if name.startswith('''norm''' ): __lowerCamelCase = '''bit.''' + name if "bit" not in name and "classifier" not in name: __lowerCamelCase = '''bit.encoder.''' + name return name def __magic_name__ ( ) -> List[Any]: __lowerCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCamelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ) return im @torch.no_grad() def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=False ) -> Dict: __lowerCamelCase = get_config(__lowerCAmelCase ) # load original model from timm __lowerCamelCase = create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ) timm_model.eval() # load state_dict of original model __lowerCamelCase = timm_model.state_dict() for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(__lowerCAmelCase ) __lowerCamelCase = val.squeeze() if '''head''' in key else val # load HuggingFace model __lowerCamelCase = BitForImageClassification(__lowerCAmelCase ) model.eval() model.load_state_dict(__lowerCAmelCase ) # create image processor __lowerCamelCase = create_transform(**resolve_data_config({} , model=__lowerCAmelCase ) ) __lowerCamelCase = transform.transforms __lowerCamelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } __lowerCamelCase = BitImageProcessor( do_resize=__lowerCAmelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowerCAmelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) __lowerCamelCase = prepare_img() __lowerCamelCase = transform(__lowerCAmelCase ).unsqueeze(0 ) __lowerCamelCase = processor(__lowerCAmelCase , return_tensors='''pt''' ).pixel_values # verify pixel values assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ) # verify logits with torch.no_grad(): __lowerCamelCase = model(__lowerCAmelCase ) __lowerCamelCase = outputs.logits print('''Logits:''' , logits[0, :3] ) print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] ) __lowerCamelCase = timm_model(__lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
from random import shuffle import tensorflow as tf from numpy import array def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ) -> Optional[Any]: __lowerCamelCase = int(__lowerCAmelCase ) assert noofclusters < len(__lowerCAmelCase ) # Find out the dimensionality __lowerCamelCase = len(vectors[0] ) # Will help select random centroids from among the available vectors __lowerCamelCase = list(range(len(__lowerCAmelCase ) ) ) shuffle(__lowerCAmelCase ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. __lowerCamelCase = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION __lowerCamelCase = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points __lowerCamelCase = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(__lowerCAmelCase ) ] ##These nodes will assign the centroid Variables the appropriate ##values __lowerCamelCase = tf.placeholder('''float64''' , [dim] ) __lowerCamelCase = [] for centroid in centroids: cent_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) __lowerCamelCase = [tf.Variable(0 ) for i in range(len(__lowerCAmelCase ) )] ##These nodes will assign an assignment Variable the appropriate ##value __lowerCamelCase = tf.placeholder('''int32''' ) __lowerCamelCase = [] for assignment in assignments: cluster_assigns.append(tf.assign(__lowerCAmelCase , __lowerCAmelCase ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input __lowerCamelCase = tf.placeholder('''float''' , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors __lowerCamelCase = tf.reduce_mean(__lowerCAmelCase , 0 ) ##Node for computing Euclidean distances # Placeholders for input __lowerCamelCase = tf.placeholder('''float''' , [dim] ) __lowerCamelCase = tf.placeholder('''float''' , [dim] ) __lowerCamelCase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__lowerCAmelCase , __lowerCAmelCase ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input __lowerCamelCase = tf.placeholder('''float''' , [noofclusters] ) __lowerCamelCase = tf.argmin(__lowerCAmelCase , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. __lowerCamelCase = tf.initialize_all_variables() # Initialize all variables sess.run(__lowerCAmelCase ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. __lowerCamelCase = 100 for _ in range(__lowerCAmelCase ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(__lowerCAmelCase ) ): __lowerCamelCase = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. __lowerCamelCase = [ sess.run(__lowerCAmelCase , feed_dict={va: vect, va: sess.run(__lowerCAmelCase )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input __lowerCamelCase = sess.run( __lowerCAmelCase , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(__lowerCAmelCase ): # Collect all the vectors assigned to this cluster __lowerCamelCase = [ vectors[i] for i in range(len(__lowerCAmelCase ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location __lowerCamelCase = sess.run( __lowerCAmelCase , feed_dict={mean_input: array(__lowerCAmelCase )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments __lowerCamelCase = sess.run(__lowerCAmelCase ) __lowerCamelCase = sess.run(__lowerCAmelCase ) return centroids, assignments
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
from __future__ import annotations from collections import Counter from random import random class lowerCAmelCase__ : def __init__( self : Dict ) -> Dict: __lowerCamelCase = {} def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> None: __lowerCamelCase = {} def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ) -> None: if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE__ ) if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = probability def __A ( self : Optional[int] ) -> list[str]: return list(self.connections ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> str: __lowerCamelCase = 0 __lowerCamelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : list[tuple[str, str, float]] , __lowerCAmelCase : int ) -> dict[str, int]: __lowerCamelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = Counter(graph.get_nodes() ) __lowerCamelCase = start for _ in range(__lowerCAmelCase ): __lowerCamelCase = graph.transition(__lowerCAmelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Optional[int] = UnCLIPImageVariationPipeline a__ : Optional[Any] = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""} a__ : Union[str, Any] = IMAGE_VARIATION_BATCH_PARAMS a__ : int = [ """generator""", """return_dict""", """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] a__ : int = False @property def __A ( self : Optional[Any] ) -> Optional[Any]: return 32 @property def __A ( self : Tuple ) -> str: return 32 @property def __A ( self : Optional[int] ) -> Tuple: return self.time_input_dim @property def __A ( self : Dict ) -> Optional[Any]: return self.time_input_dim * 4 @property def __A ( self : Optional[Any] ) -> List[Any]: return 1_00 @property def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __A ( self : int ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ ) @property def __A ( self : Any ) -> str: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ ) @property def __A ( self : Dict ) -> Tuple: torch.manual_seed(0 ) __lowerCamelCase = { '''clip_embeddings_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''cross_attention_dim''': self.cross_attention_dim, } __lowerCamelCase = UnCLIPTextProjModel(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : str ) -> Optional[int]: torch.manual_seed(0 ) __lowerCamelCase = { '''sample_size''': 32, # RGB in channels '''in_channels''': 3, # Out channels is double in channels because predicts mean and variance '''out_channels''': 6, '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': '''identity''', } __lowerCamelCase = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Optional[int] ) -> List[Any]: return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __A ( self : int ) -> List[str]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def __A ( self : List[str] ) -> List[Any]: # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1 ) __lowerCamelCase = UNetaDModel(**self.dummy_super_res_kwargs ) return model def __A ( self : Union[str, Any] ) -> List[str]: __lowerCamelCase = self.dummy_decoder __lowerCamelCase = self.dummy_text_proj __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = self.dummy_tokenizer __lowerCamelCase = self.dummy_super_res_first __lowerCamelCase = self.dummy_super_res_last __lowerCamelCase = UnCLIPScheduler( variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , ) __lowerCamelCase = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , ) __lowerCamelCase = CLIPImageProcessor(crop_size=32 , size=32 ) __lowerCamelCase = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ) -> List[Any]: __lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) if pil_image: __lowerCamelCase = input_image * 0.5 + 0.5 __lowerCamelCase = input_image.clamp(0 , 1 ) __lowerCamelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __lowerCamelCase = DiffusionPipeline.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __A ( self : Dict ) -> Optional[Any]: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = output.images __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe( **SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : Optional[int] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = output.images __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe( **SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : List[Any] ) -> str: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ pipeline_inputs['''image'''], pipeline_inputs['''image'''], ] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = output.images __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ tuple_pipeline_inputs['''image'''], tuple_pipeline_inputs['''image'''], ] __lowerCamelCase = pipe( **SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) __lowerCamelCase = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : List[Any] ) -> str: __lowerCamelCase = torch.device('''cpu''' ) class lowerCAmelCase__ : a__ : List[Any] = 1 __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe.decoder.dtype __lowerCamelCase = 1 __lowerCamelCase = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) __lowerCamelCase = pipe.prepare_latents( SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , scheduler=DummyScheduler() ) __lowerCamelCase = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) __lowerCamelCase = pipe.prepare_latents( SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , scheduler=DummyScheduler() ) __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe( **SCREAMING_SNAKE_CASE__ , decoder_latents=SCREAMING_SNAKE_CASE__ , super_res_latents=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ , pil_image=SCREAMING_SNAKE_CASE__ ) # Don't pass image, instead pass embedding __lowerCamelCase = pipeline_inputs.pop('''image''' ) __lowerCamelCase = pipe.image_encoder(SCREAMING_SNAKE_CASE__ ).image_embeds __lowerCamelCase = pipe( **SCREAMING_SNAKE_CASE__ , decoder_latents=SCREAMING_SNAKE_CASE__ , super_res_latents=SCREAMING_SNAKE_CASE__ , image_embeddings=SCREAMING_SNAKE_CASE__ , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1e-4 @skip_mps def __A ( self : List[Any] ) -> int: __lowerCamelCase = torch_device == '''cpu''' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor __lowerCamelCase = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE__ , expected_max_diff=SCREAMING_SNAKE_CASE__ ) @skip_mps def __A ( self : Union[str, Any] ) -> Tuple: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True __lowerCamelCase = [ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Tuple ) -> Any: __lowerCamelCase = [ '''decoder_num_inference_steps''', '''super_res_num_inference_steps''', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes __lowerCamelCase = [2, 3] self._test_inference_batch_consistent( batch_sizes=SCREAMING_SNAKE_CASE__ , additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE__ , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=SCREAMING_SNAKE_CASE__ ) @skip_mps def __A ( self : int ) -> int: return super().test_dict_tuple_outputs_equivalent() @skip_mps def __A ( self : List[Any] ) -> Any: return super().test_save_load_local() @skip_mps def __A ( self : Tuple ) -> str: return super().test_save_load_optional_components() @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[Any] ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' ) __lowerCamelCase = UnCLIPImageVariationPipeline.from_pretrained( '''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa ) __lowerCamelCase = pipeline.to(SCREAMING_SNAKE_CASE__ ) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = pipeline( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (2_56, 2_56, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 15 )
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F'{price_plus_tax(100, 0.2_5) = }') print(F'{price_plus_tax(1_2_5.5_0, 0.0_5) = }')
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ : int = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = ["ChineseCLIPFeatureExtractor"] SCREAMING_SNAKE_CASE__ : int = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : int = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration SCREAMING_SNAKE_CASE__ : List[str] = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def __magic_name__ ( __lowerCAmelCase : str ) -> Any: __lowerCamelCase = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def __magic_name__ ( __lowerCAmelCase : str ) -> Dict: __lowerCamelCase = list(s_dict.keys() ) for key in keys: __lowerCamelCase = key for k, v in WHISPER_MAPPING.items(): if k in key: __lowerCamelCase = new_key.replace(__lowerCAmelCase , __lowerCAmelCase ) print(f'''{key} -> {new_key}''' ) __lowerCamelCase = s_dict.pop(__lowerCAmelCase ) return s_dict def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = emb.weight.shape __lowerCamelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) __lowerCamelCase = emb.weight.data return lin_layer def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> bytes: os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) __lowerCamelCase = os.path.basename(__lowerCAmelCase ) __lowerCamelCase = url.split('''/''' )[-2] __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ): raise RuntimeError(f'''{download_target} exists and is not a regular file''' ) if os.path.isfile(__lowerCAmelCase ): __lowerCamelCase = open(__lowerCAmelCase , '''rb''' ).read() if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=__lowerCAmelCase , unit_divisor=1024 ) as loop: while True: __lowerCamelCase = source.read(8192 ) if not buffer: break output.write(__lowerCAmelCase ) loop.update(len(__lowerCAmelCase ) ) __lowerCamelCase = open(__lowerCAmelCase , '''rb''' ).read() if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : str ) -> Any: if ".pt" not in checkpoint_path: __lowerCamelCase = _download(_MODELS[checkpoint_path] ) else: __lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' ) __lowerCamelCase = original_checkpoint['''dims'''] __lowerCamelCase = original_checkpoint['''model_state_dict'''] __lowerCamelCase = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(__lowerCAmelCase ) rename_keys(__lowerCAmelCase ) __lowerCamelCase = True __lowerCamelCase = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] __lowerCamelCase = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) __lowerCamelCase = WhisperForConditionalGeneration(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f''' but all the following weights are missing {missing}''' ) if tie_embeds: __lowerCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __lowerCamelCase = proj_out_weights model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available SCREAMING_SNAKE_CASE__ : Any = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __magic_name__ ( __lowerCAmelCase : NDArray[floataa] , __lowerCAmelCase : NDArray[floataa] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , ) -> list[float]: __lowerCamelCase , __lowerCamelCase = coefficient_matrix.shape __lowerCamelCase , __lowerCamelCase = constant_matrix.shape if rowsa != colsa: __lowerCamelCase = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__lowerCAmelCase ) if colsa != 1: __lowerCamelCase = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__lowerCAmelCase ) if rowsa != rowsa: __lowerCamelCase = ( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__lowerCAmelCase ) if len(__lowerCAmelCase ) != rowsa: __lowerCamelCase = ( '''Number of initial values must be equal to number of rows in coefficient ''' f'''matrix but received {len(__lowerCAmelCase )} and {rowsa}''' ) raise ValueError(__lowerCAmelCase ) if iterations <= 0: raise ValueError('''Iterations must be at least 1''' ) __lowerCamelCase = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __lowerCamelCase , __lowerCamelCase = table.shape strictly_diagonally_dominant(__lowerCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(__lowerCAmelCase ): __lowerCamelCase = [] for row in range(__lowerCAmelCase ): __lowerCamelCase = 0 for col in range(__lowerCAmelCase ): if col == row: __lowerCamelCase = table[row][col] elif col == cols - 1: __lowerCamelCase = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __lowerCamelCase = (temp + val) / denom new_val.append(__lowerCAmelCase ) __lowerCamelCase = new_val return [float(__lowerCAmelCase ) for i in new_val] def __magic_name__ ( __lowerCAmelCase : NDArray[floataa] ) -> bool: __lowerCamelCase , __lowerCamelCase = table.shape __lowerCamelCase = True for i in range(0 , __lowerCAmelCase ): __lowerCamelCase = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def __magic_name__ ( ) -> None: print('''Making key files...''' ) make_key_files('''rsa''' , 1024 ) print('''Key files generation successful.''' ) def __magic_name__ ( __lowerCAmelCase : int ) -> tuple[tuple[int, int], tuple[int, int]]: print('''Generating prime p...''' ) __lowerCamelCase = rabinMiller.generate_large_prime(__lowerCAmelCase ) print('''Generating prime q...''' ) __lowerCamelCase = rabinMiller.generate_large_prime(__lowerCAmelCase ) __lowerCamelCase = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: __lowerCamelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(__lowerCAmelCase , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) __lowerCamelCase = cryptoMath.find_mod_inverse(__lowerCAmelCase , (p - 1) * (q - 1) ) __lowerCamelCase = (n, e) __lowerCamelCase = (n, d) return (public_key, private_key) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ) -> None: if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() __lowerCamelCase , __lowerCamelCase = generate_key(__lowerCAmelCase ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' , '''w''' ) as out_file: out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' , '''w''' ) as out_file: out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
def __magic_name__ ( ) -> str: __lowerCamelCase = 0 for i in range(1 , 1001 ): total += i**i return str(__lowerCAmelCase )[-10:] if __name__ == "__main__": print(solution())
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) class lowerCAmelCase__ ( __lowercase ): def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> None: warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : int = CodeGenTokenizer a__ : List[str] = CodeGenTokenizerFast a__ : Optional[int] = True a__ : List[Any] = {"""add_prefix_space""": True} a__ : str = False def __A ( self : Union[str, Any] ) -> Optional[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) def __A ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : str ) -> Any: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = '''lower newer''' __lowerCamelCase = '''lower newer''' return input_text, output_text def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokens + [tokenizer.unk_token] __lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : str ) -> Tuple: if not self.test_rust_tokenizer: return __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' # Testing tokenization __lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids without special tokens __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing conversion to ids with special tokens __lowerCamelCase = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Testing the unknown token __lowerCamelCase = tokens + [rust_tokenizer.unk_token] __lowerCamelCase = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Tuple ) -> str: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Simple input __lowerCamelCase = '''This is a simple input''' __lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCamelCase = ('''This is a simple input''', '''This is a pair''') __lowerCamelCase = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input __lowerCamelCase = '''This is a simple input''' __lowerCamelCase = ['''This is a simple input looooooooong''', '''This is a simple input'''] __lowerCamelCase = ('''This is a simple input''', '''This is a pair''') __lowerCamelCase = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] __lowerCamelCase = tokenizer.pad_token_id __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = tokenizer(*SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncate=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def __A ( self : Any ) -> Optional[Any]: __lowerCamelCase = '''$$$''' __lowerCamelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE__ , add_bos_token=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''This is a simple input''' __lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCamelCase = tokenizer.bos_token_id __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) __lowerCamelCase = tokenizer.decode(out_s.input_ids ) __lowerCamelCase = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def __A ( self : Optional[int] ) -> Optional[int]: __lowerCamelCase = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) __lowerCamelCase = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' __lowerCamelCase = '''\nif len_a > len_b: result = a\nelse: result = b''' __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] __lowerCamelCase = tokenizer.decode(SCREAMING_SNAKE_CASE__ , truncate_before_pattern=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Optional[int]: pass
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
from math import sqrt def __magic_name__ ( __lowerCAmelCase : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( __lowerCAmelCase : int = 1_0001 ) -> int: __lowerCamelCase = 0 __lowerCamelCase = 1 while count != nth and number < 3: number += 1 if is_prime(__lowerCAmelCase ): count += 1 while count != nth: number += 2 if is_prime(__lowerCAmelCase ): count += 1 return number if __name__ == "__main__": print(F'{solution() = }')
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> List[Any]: __lowerCamelCase = s.rsplit(__lowerCAmelCase , __lowerCAmelCase ) return new.join(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Any: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> Tuple: __lowerCamelCase = {} __lowerCamelCase = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __lowerCamelCase = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' ) if "res_path" in key: __lowerCamelCase = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): __lowerCamelCase = rreplace(__lowerCAmelCase , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): __lowerCamelCase = rreplace(__lowerCAmelCase , '''.b''' , '''.bias''' , 1 ) __lowerCamelCase = value.float() return upgrade @torch.no_grad() def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Dict=True ) -> Tuple: from dall_e import Encoder __lowerCamelCase = Encoder() if os.path.exists(__lowerCAmelCase ): __lowerCamelCase = torch.load(__lowerCAmelCase ) else: __lowerCamelCase = torch.hub.load_state_dict_from_url(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = ckpt.state_dict() encoder.load_state_dict(__lowerCAmelCase ) if config_path is not None: __lowerCamelCase = FlavaImageCodebookConfig.from_pretrained(__lowerCAmelCase ) else: __lowerCamelCase = FlavaImageCodebookConfig() __lowerCamelCase = FlavaImageCodebook(__lowerCAmelCase ).eval() __lowerCamelCase = encoder.state_dict() __lowerCamelCase = upgrade_state_dict(__lowerCAmelCase ) hf_model.load_state_dict(__lowerCAmelCase ) __lowerCamelCase = hf_model.state_dict() __lowerCamelCase = count_parameters(__lowerCAmelCase ) __lowerCamelCase = count_parameters(__lowerCAmelCase ) assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(__lowerCAmelCase ) else: return hf_state_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") SCREAMING_SNAKE_CASE__ : Dict = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("utf-8").split() SCREAMING_SNAKE_CASE__ : Optional[int] = "|".join(sys.argv[1:]) SCREAMING_SNAKE_CASE__ : List[str] = re.compile(rF'^({joined_dirs}).*?\.py$') SCREAMING_SNAKE_CASE__ : Optional[int] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> str: assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Dict ) -> Any: __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCamelCase = SqlDatasetReader( '''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read() _check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase ) @require_sqlalchemy @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCamelCase = features.copy() if features else default_expected_features __lowerCamelCase = ( Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read() _check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: with contextlib.closing(sqlitea.connect(__lowerCAmelCase ) ) as con: __lowerCamelCase = con.cursor() cur.execute('''SELECT * FROM dataset''' ) for row in cur: yield row @require_sqlalchemy def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ) -> List[Any]: __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(__lowerCAmelCase , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCAmelCase ).read() SqlDatasetWriter(__lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write() __lowerCamelCase = iter_sql_file(__lowerCAmelCase ) __lowerCamelCase = iter_sql_file(__lowerCAmelCase ) for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ) -> Dict: __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(__lowerCAmelCase , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCAmelCase ).read() SqlDatasetWriter(__lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write() __lowerCamelCase = iter_sql_file(__lowerCAmelCase ) __lowerCamelCase = iter_sql_file(__lowerCAmelCase ) for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ): assert rowa == rowa @require_sqlalchemy def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : str ) -> Union[str, Any]: __lowerCamelCase = tmp_path / '''cache''' __lowerCamelCase = os.path.join(__lowerCAmelCase , '''tmp.sql''' ) __lowerCamelCase = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=__lowerCAmelCase ).read() with pytest.raises(__lowerCAmelCase ): SqlDatasetWriter(__lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[float] , __lowerCAmelCase : int ) -> Optional[Any]: print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(__lowerCAmelCase ): print(f'''{i}\t\t{d}''' ) def __magic_name__ ( __lowerCAmelCase : list[dict[str, int]] , __lowerCAmelCase : list[float] , __lowerCAmelCase : int ) -> List[str]: for j in range(__lowerCAmelCase ): __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def __magic_name__ ( __lowerCAmelCase : list[dict[str, int]] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list[float]: __lowerCamelCase = [float('''inf''' )] * vertex_count __lowerCamelCase = 0.0 for _ in range(vertex_count - 1 ): for j in range(__lowerCAmelCase ): __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: __lowerCamelCase = distance[u] + w __lowerCamelCase = check_negative_cycle(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE__ : str = int(input("Enter number of vertices: ").strip()) SCREAMING_SNAKE_CASE__ : Any = int(input("Enter number of edges: ").strip()) SCREAMING_SNAKE_CASE__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print("Edge ", i + 1) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = ( int(x) for x in input("Enter source, destination, weight: ").strip().split(" ") ) SCREAMING_SNAKE_CASE__ : Any = {"src": src, "dst": dest, "weight": weight} SCREAMING_SNAKE_CASE__ : Optional[int] = int(input("\nEnter shortest path source:").strip()) SCREAMING_SNAKE_CASE__ : Tuple = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
SCREAMING_SNAKE_CASE__ : List[Any] = [ (1_000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def __magic_name__ ( __lowerCAmelCase : str ) -> int: __lowerCamelCase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000} __lowerCamelCase = 0 __lowerCamelCase = 0 while place < len(__lowerCAmelCase ): if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def __magic_name__ ( __lowerCAmelCase : int ) -> str: __lowerCamelCase = [] for arabic, roman in ROMAN: ((__lowerCamelCase) , (__lowerCamelCase)) = divmod(__lowerCAmelCase , __lowerCAmelCase ) result.append(roman * factor ) if number == 0: break return "".join(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from math import factorial SCREAMING_SNAKE_CASE__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)} def __magic_name__ ( __lowerCAmelCase : int ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''Parameter number must be int''' ) if number < 0: raise ValueError('''Parameter number must be greater than or equal to 0''' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''Parameters chain_length and number_limit must be int''' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( '''Parameters chain_length and number_limit must be greater than 0''' ) # the counter for the chains with the exact desired length __lowerCamelCase = 0 # the cached sizes of the previous chains __lowerCamelCase = {} for start_chain_element in range(1 , __lowerCAmelCase ): # The temporary set will contain the elements of the chain __lowerCamelCase = set() __lowerCamelCase = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __lowerCamelCase = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCAmelCase ) chain_set_length += 1 __lowerCamelCase = digit_factorial_sum(__lowerCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __lowerCamelCase = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution()}')
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
SCREAMING_SNAKE_CASE__ : str = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers SCREAMING_SNAKE_CASE__ : str = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
SCREAMING_SNAKE_CASE__ : dict[tuple[int, int, int], int] = {} def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on __lowerCamelCase = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one __lowerCamelCase = _calculate(days - 1 , __lowerCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 __lowerCamelCase = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter __lowerCamelCase = _calculate(days - 1 , __lowerCAmelCase , 0 ) __lowerCamelCase = state_late + state_absent + state_ontime __lowerCamelCase = prizestrings return prizestrings def __magic_name__ ( __lowerCAmelCase : int = 30 ) -> int: return _calculate(__lowerCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Optional[int] ) -> Tuple: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> str: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple ) -> Tuple: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Dict: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Optional[Any] ) -> List[Any]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : int ) -> Dict: __lowerCamelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> List[Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Any ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
SCREAMING_SNAKE_CASE__ : List[Any] = "0.18.2" from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration SCREAMING_SNAKE_CASE__ : List[Any] = pytest.mark.integration SCREAMING_SNAKE_CASE__ : Dict = {"comet"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = importlib.util.find_spec("fairseq") is not None SCREAMING_SNAKE_CASE__ : Optional[Any] = {"code_eval"} SCREAMING_SNAKE_CASE__ : str = os.name == "nt" SCREAMING_SNAKE_CASE__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"} SCREAMING_SNAKE_CASE__ : Tuple = importlib.util.find_spec("transformers") is not None def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Optional[int]: @wraps(__lowerCAmelCase ) def wrapper(self : Tuple , __lowerCAmelCase : int ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , __lowerCAmelCase ) return wrapper def __magic_name__ ( __lowerCAmelCase : Dict ) -> List[Any]: @wraps(__lowerCAmelCase ) def wrapper(self : Tuple , __lowerCAmelCase : Tuple ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , __lowerCAmelCase ) return wrapper def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: @wraps(__lowerCAmelCase ) def wrapper(self : Optional[int] , __lowerCAmelCase : Any ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , __lowerCAmelCase ) return wrapper def __magic_name__ ( ) -> str: __lowerCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( __lowercase , __lowercase , __lowercase ) @local class lowerCAmelCase__ ( parameterized.TestCase ): a__ : str = {} a__ : str = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Tuple: __lowerCamelCase = '''[...]''' __lowerCamelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path ) __lowerCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ ) # check parameters __lowerCamelCase = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ): with self.use_local_metrics(): try: __lowerCamelCase = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: __lowerCamelCase = '''[...]''' __lowerCamelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path ) # run doctest with self.use_local_metrics(): __lowerCamelCase = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> str: if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ): yield else: yield @contextmanager def __A ( self : Optional[Any] ) -> Optional[Any]: def load_local_metric(SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with patch('''datasets.load_metric''' ) as mock_load_metric: __lowerCamelCase = load_local_metric yield @classmethod def __A ( cls : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple: def wrapper(SCREAMING_SNAKE_CASE__ : List[Any] ): __lowerCamelCase = contextmanager(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def __magic_name__ ( __lowerCAmelCase : int ) -> Optional[Any]: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class lowerCAmelCase__ ( __lowercase ): def __A ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]: assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: __lowerCamelCase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[str]: import torch def bert_cos_score_idf(__lowerCAmelCase : Dict , __lowerCAmelCase : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCAmelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: __lowerCamelCase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[Any]: def load_from_checkpoint(__lowerCAmelCase : Optional[int] ): class lowerCAmelCase__ : def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: assert len(SCREAMING_SNAKE_CASE__ ) == 2 __lowerCamelCase = [0.19, 0.92] return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: __lowerCamelCase = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: __lowerCamelCase = load_from_checkpoint yield def __magic_name__ ( ) -> Union[str, Any]: __lowerCamelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) __lowerCamelCase = '''ERROR''' __lowerCamelCase = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ): metric.compute(predictions=[] , references=[] , scheme=__lowerCAmelCase )
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
SCREAMING_SNAKE_CASE__ : Tuple = { "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } SCREAMING_SNAKE_CASE__ : Optional[Any] = {value: key for key, value in encode_dict.items()} def __magic_name__ ( __lowerCAmelCase : str ) -> str: __lowerCamelCase = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __magic_name__ ( __lowerCAmelCase : str ) -> str: if set(__lowerCAmelCase ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __lowerCamelCase = '''''' for word in coded.split(): while len(__lowerCAmelCase ) != 0: decoded += decode_dict[word[:5]] __lowerCamelCase = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") SCREAMING_SNAKE_CASE__ : Any = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : bool = field( default=__lowercase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} ) a__ : bool = field( default=__lowercase , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) a__ : Optional[int] = field( default=__lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of prediction examples to this """ """value if set.""" ) } , ) @dataclass class lowerCAmelCase__ : a__ : str = field( default=__lowercase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : str = field( default=__lowercase , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Train language if it is different from the evaluation language."""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : Optional[bool] = field( default=__lowercase , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , ) a__ : bool = field( default=__lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) a__ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a__ : bool = field( default=__lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) a__ : bool = field( default=__lowercase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_xnli''' , __lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCamelCase = training_args.get_process_log_level() logger.setLevel(__lowerCAmelCase ) datasets.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.set_verbosity(__lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __lowerCamelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCamelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: __lowerCamelCase = load_dataset( '''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: __lowerCamelCase = load_dataset( '''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCamelCase = train_dataset.features['''label'''].names if training_args.do_eval: __lowerCamelCase = load_dataset( '''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCamelCase = eval_dataset.features['''label'''].names if training_args.do_predict: __lowerCamelCase = load_dataset( '''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCamelCase = predict_dataset.features['''label'''].names # Labels __lowerCamelCase = len(__lowerCAmelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCAmelCase , idalabel={str(__lowerCAmelCase ): label for i, label in enumerate(__lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(__lowerCAmelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: __lowerCamelCase = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __lowerCamelCase = False def preprocess_function(__lowerCAmelCase : int ): # Tokenize the texts return tokenizer( examples['''premise'''] , examples['''hypothesis'''] , padding=__lowerCAmelCase , max_length=data_args.max_seq_length , truncation=__lowerCAmelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: __lowerCamelCase = min(len(__lowerCAmelCase ) , data_args.max_train_samples ) __lowerCamelCase = train_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): __lowerCamelCase = train_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , ) # Log a few random samples from the training set: for index in random.sample(range(len(__lowerCAmelCase ) ) , 3 ): logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' ) if training_args.do_eval: if data_args.max_eval_samples is not None: __lowerCamelCase = min(len(__lowerCAmelCase ) , data_args.max_eval_samples ) __lowerCamelCase = eval_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): __lowerCamelCase = eval_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , ) if training_args.do_predict: if data_args.max_predict_samples is not None: __lowerCamelCase = min(len(__lowerCAmelCase ) , data_args.max_predict_samples ) __lowerCamelCase = predict_dataset.select(range(__lowerCAmelCase ) ) with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ): __lowerCamelCase = predict_dataset.map( __lowerCAmelCase , batched=__lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , ) # Get the metric function __lowerCamelCase = evaluate.load('''xnli''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCAmelCase : EvalPrediction ): __lowerCamelCase = p.predictions[0] if isinstance(p.predictions , __lowerCAmelCase ) else p.predictions __lowerCamelCase = np.argmax(__lowerCAmelCase , axis=1 ) return metric.compute(predictions=__lowerCAmelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __lowerCamelCase = default_data_collator elif training_args.fpaa: __lowerCamelCase = DataCollatorWithPadding(__lowerCAmelCase , pad_to_multiple_of=8 ) else: __lowerCamelCase = None # Initialize our Trainer __lowerCamelCase = Trainer( model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , ) # Training if training_args.do_train: __lowerCamelCase = None if training_args.resume_from_checkpoint is not None: __lowerCamelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCamelCase = last_checkpoint __lowerCamelCase = trainer.train(resume_from_checkpoint=__lowerCAmelCase ) __lowerCamelCase = train_result.metrics __lowerCamelCase = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCAmelCase ) ) __lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''' , __lowerCAmelCase ) trainer.save_metrics('''train''' , __lowerCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(eval_dataset=__lowerCAmelCase ) __lowerCamelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCAmelCase ) __lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics('''eval''' , __lowerCAmelCase ) trainer.save_metrics('''eval''' , __lowerCAmelCase ) # Prediction if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = trainer.predict(__lowerCAmelCase , metric_key_prefix='''predict''' ) __lowerCamelCase = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCAmelCase ) ) __lowerCamelCase = min(__lowerCAmelCase , len(__lowerCAmelCase ) ) trainer.log_metrics('''predict''' , __lowerCAmelCase ) trainer.save_metrics('''predict''' , __lowerCAmelCase ) __lowerCamelCase = np.argmax(__lowerCAmelCase , axis=1 ) __lowerCamelCase = os.path.join(training_args.output_dir , '''predictions.txt''' ) if trainer.is_world_process_zero(): with open(__lowerCAmelCase , '''w''' ) as writer: writer.write('''index\tprediction\n''' ) for index, item in enumerate(__lowerCAmelCase ): __lowerCamelCase = label_list[item] writer.write(f'''{index}\t{item}\n''' ) if __name__ == "__main__": main()
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import itertools import string from collections.abc import Generator, Iterable def __magic_name__ ( __lowerCAmelCase : Iterable[str] , __lowerCAmelCase : int ) -> Generator[tuple[str, ...], None, None]: __lowerCamelCase = iter(__lowerCAmelCase ) while True: __lowerCamelCase = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) ) if not chunk: return yield chunk def __magic_name__ ( __lowerCAmelCase : str ) -> str: __lowerCamelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) __lowerCamelCase = '''''' if len(__lowerCAmelCase ) < 2: return dirty for i in range(len(__lowerCAmelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__lowerCAmelCase ) & 1: clean += "X" return clean def __magic_name__ ( __lowerCAmelCase : str ) -> list[str]: # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) __lowerCamelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler __lowerCamelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__lowerCAmelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__lowerCAmelCase ) return table def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str: __lowerCamelCase = generate_table(__lowerCAmelCase ) __lowerCamelCase = prepare_input(__lowerCAmelCase ) __lowerCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__lowerCAmelCase , 2 ): __lowerCamelCase , __lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 ) __lowerCamelCase , __lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> str: __lowerCamelCase = generate_table(__lowerCAmelCase ) __lowerCamelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__lowerCAmelCase , 2 ): __lowerCamelCase , __lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 ) __lowerCamelCase , __lowerCamelCase = divmod(table.index(__lowerCAmelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" SCREAMING_SNAKE_CASE__ : Tuple = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" SCREAMING_SNAKE_CASE__ : str = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __A ( self : str ) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : List[List[List[str]]] , SCREAMING_SNAKE_CASE__ : List[List[str]] , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE__ , hypotheses=SCREAMING_SNAKE_CASE__ , min_len=SCREAMING_SNAKE_CASE__ , max_len=SCREAMING_SNAKE_CASE__ ) }
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter SCREAMING_SNAKE_CASE__ : Union[str, Any] = "Create a default config file for Accelerate with only a few flags set." def __magic_name__ ( __lowerCAmelCase : int="no" , __lowerCAmelCase : str = default_json_config_file , __lowerCAmelCase : bool = False ) -> int: __lowerCamelCase = Path(__lowerCAmelCase ) path.parent.mkdir(parents=__lowerCAmelCase , exist_ok=__lowerCAmelCase ) if path.exists(): print( f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False __lowerCamelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) __lowerCamelCase = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): __lowerCamelCase = torch.cuda.device_count() __lowerCamelCase = num_gpus __lowerCamelCase = False if num_gpus > 1: __lowerCamelCase = '''MULTI_GPU''' else: __lowerCamelCase = '''NO''' elif is_xpu_available() and use_xpu: __lowerCamelCase = torch.xpu.device_count() __lowerCamelCase = num_xpus __lowerCamelCase = False if num_xpus > 1: __lowerCamelCase = '''MULTI_XPU''' else: __lowerCamelCase = '''NO''' elif is_npu_available(): __lowerCamelCase = torch.npu.device_count() __lowerCamelCase = num_npus __lowerCamelCase = False if num_npus > 1: __lowerCamelCase = '''MULTI_NPU''' else: __lowerCamelCase = '''NO''' else: __lowerCamelCase = 0 __lowerCamelCase = True __lowerCamelCase = 1 __lowerCamelCase = '''NO''' __lowerCamelCase = ClusterConfig(**__lowerCAmelCase ) config.to_json_file(__lowerCAmelCase ) return path def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> Optional[int]: __lowerCamelCase = parser.add_parser('''default''' , parents=__lowerCAmelCase , help=__lowerCAmelCase , formatter_class=__lowerCAmelCase ) parser.add_argument( '''--config_file''' , default=__lowerCAmelCase , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=__lowerCAmelCase , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=__lowerCAmelCase ) return parser def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> List[str]: __lowerCamelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f'''accelerate configuration saved at {config_file}''' )
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : int = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class lowerCAmelCase__ ( __lowercase ): a__ : Tuple = """imagegpt""" a__ : Dict = ["""past_key_values"""] a__ : int = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 + 1 , SCREAMING_SNAKE_CASE__ : Dict=32 * 32 , SCREAMING_SNAKE_CASE__ : Dict=5_12 , SCREAMING_SNAKE_CASE__ : str=24 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : int="quick_gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Dict=1e-5 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Any=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = n_positions __lowerCamelCase = n_embd __lowerCamelCase = n_layer __lowerCamelCase = n_head __lowerCamelCase = n_inner __lowerCamelCase = activation_function __lowerCamelCase = resid_pdrop __lowerCamelCase = embd_pdrop __lowerCamelCase = attn_pdrop __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_range __lowerCamelCase = scale_attn_weights __lowerCamelCase = use_cache __lowerCamelCase = scale_attn_by_inverse_layer_idx __lowerCamelCase = reorder_and_upcast_attn __lowerCamelCase = tie_word_embeddings super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __A ( self : int , SCREAMING_SNAKE_CASE__ : "FeatureExtractionMixin" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , ) -> Mapping[str, Any]: __lowerCamelCase = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) return inputs
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() SCREAMING_SNAKE_CASE__ : List[Any] = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model SCREAMING_SNAKE_CASE__ : Optional[Any] = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.1_5}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names SCREAMING_SNAKE_CASE__ : int = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: SCREAMING_SNAKE_CASE__ : Optional[int] = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: SCREAMING_SNAKE_CASE__ : int = "allenai" def __magic_name__ ( __lowerCAmelCase : Optional[int] ) -> List[Any]: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} __lowerCamelCase = dict((re.sub(R'''@@$''' , '''''' , __lowerCAmelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , __lowerCAmelCase ), v) for k, v in d.items() ) __lowerCamelCase = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f'''{k}</w>'''] __lowerCamelCase = d[k] # restore return da def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ) -> Optional[int]: # prep assert os.path.exists(__lowerCAmelCase ) os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) print(f'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models __lowerCamelCase = basename(__lowerCAmelCase ) __lowerCamelCase = dirname(__lowerCAmelCase ) __lowerCamelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel __lowerCamelCase = cls.hub_models() __lowerCamelCase = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} __lowerCamelCase = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f'''using checkpoint {checkpoint_file}''' ) __lowerCamelCase = hub_utils.from_pretrained( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , archive_map=__lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = vars(chkpt['''args''']['''model'''] ) __lowerCamelCase = args['''source_lang'''] __lowerCamelCase = args['''target_lang'''] __lowerCamelCase = dirname(__lowerCAmelCase ) __lowerCamelCase = basename(__lowerCAmelCase ) # dicts __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''dict.{src_lang}.txt''' ) __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''dict.{tgt_lang}.txt''' ) __lowerCamelCase = Dictionary.load(__lowerCAmelCase ) __lowerCamelCase = rewrite_dict_keys(src_dict.indices ) __lowerCamelCase = len(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , '''vocab-src.json''' ) print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab __lowerCamelCase = True for k in src_vocab.keys(): if not k.islower(): __lowerCamelCase = False break __lowerCamelCase = Dictionary.load(__lowerCAmelCase ) __lowerCamelCase = rewrite_dict_keys(tgt_dict.indices ) __lowerCamelCase = len(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , '''vocab-tgt.json''' ) print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # merges_file (bpecodes) __lowerCamelCase = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): break with open(__lowerCAmelCase , encoding='''utf-8''' ) as fin: __lowerCamelCase = fin.read() __lowerCamelCase = re.sub(R''' \d+$''' , '''''' , __lowerCAmelCase , 0 , re.M ) # remove frequency number print(f'''Generating {merges_file}''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as fout: fout.write(__lowerCAmelCase ) # model config __lowerCamelCase = os.path.join(__lowerCAmelCase , '''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}''' assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}''' __lowerCamelCase = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with __lowerCamelCase = 5 __lowerCamelCase = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: __lowerCamelCase = best_score_hparams[model_dir]['''length_penalty'''] else: __lowerCamelCase = 1.0 print(f'''Generating {fsmt_model_config_file}''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # tokenizer config __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 1024, '''do_lower_case''': do_lower_case, } print(f'''Generating {fsmt_tokenizer_config_file}''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(__lowerCAmelCase , ensure_ascii=__lowerCAmelCase , indent=__lowerCAmelCase ) ) # model __lowerCamelCase = chkpt['''models'''][0] __lowerCamelCase = model.state_dict() # rename keys to start with 'model.' __lowerCamelCase = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys __lowerCamelCase = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = FSMTConfig.from_pretrained(__lowerCAmelCase ) __lowerCamelCase = FSMTForConditionalGeneration(__lowerCAmelCase ) # check that it loads ok model_new.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) # save __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) print(f'''Generating {pytorch_weights_dump_path}''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(f'''cd {data_root}''' ) print(f'''transformers-cli upload {model_dir}''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
import os import sys import unittest SCREAMING_SNAKE_CASE__ : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path SCREAMING_SNAKE_CASE__ : Any = os.path.join(git_repo_path, "src", "diffusers") class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Dict ) -> str: __lowerCamelCase = find_backend(''' if not is_torch_available():''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __lowerCamelCase = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __lowerCamelCase = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , '''torch_and_transformers_and_onnx''' ) def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , SCREAMING_SNAKE_CASE__ ) self.assertIn('''torch_and_transformers''' , SCREAMING_SNAKE_CASE__ ) self.assertIn('''flax_and_transformers''' , SCREAMING_SNAKE_CASE__ ) self.assertIn('''torch_and_transformers_and_onnx''' , SCREAMING_SNAKE_CASE__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def __A ( self : Tuple ) -> List[Any]: __lowerCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , '''\nCONSTANT = None\n''' ) __lowerCamelCase = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( SCREAMING_SNAKE_CASE__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __lowerCamelCase = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' __lowerCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' __lowerCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , SCREAMING_SNAKE_CASE__ )
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline SCREAMING_SNAKE_CASE__ : Optional[Any] = { "n_samples": 64, "horizon": 32, "num_inference_steps": 20, "n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network "scale_grad_by_std": True, "scale": 0.1, "eta": 0.0, "t_grad_cutoff": 2, "device": "cpu", } if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "hopper-medium-v2" SCREAMING_SNAKE_CASE__ : Union[str, Any] = gym.make(env_name) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ValueGuidedRLPipeline.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", env=env, ) env.seed(0) SCREAMING_SNAKE_CASE__ : Optional[int] = env.reset() SCREAMING_SNAKE_CASE__ : Optional[Any] = 0 SCREAMING_SNAKE_CASE__ : List[Any] = 0 SCREAMING_SNAKE_CASE__ : int = 1_000 SCREAMING_SNAKE_CASE__ : List[str] = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy SCREAMING_SNAKE_CASE__ : List[str] = pipeline(obs, planning_horizon=32) # execute action in environment SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = env.step(denorm_actions) SCREAMING_SNAKE_CASE__ : List[Any] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:' F' {total_score}' ) # save observations for rendering rollout.append(next_observation.copy()) SCREAMING_SNAKE_CASE__ : List[str] = next_observation except KeyboardInterrupt: pass print(F'Total reward: {total_reward}')
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> List[Any]: # load base model __lowerCamelCase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __lowerCamelCase = load_file(__lowerCAmelCase ) __lowerCamelCase = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __lowerCamelCase = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) __lowerCamelCase = pipeline.text_encoder else: __lowerCamelCase = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) __lowerCamelCase = pipeline.unet # find the target layer __lowerCamelCase = layer_infos.pop(0 ) while len(__lowerCAmelCase ) > -1: try: __lowerCamelCase = curr_layer.__getattr__(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: __lowerCamelCase = layer_infos.pop(0 ) elif len(__lowerCAmelCase ) == 0: break except Exception: if len(__lowerCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __lowerCamelCase = layer_infos.pop(0 ) __lowerCamelCase = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(__lowerCAmelCase ) else: pair_keys.append(__lowerCAmelCase ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __lowerCamelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __lowerCamelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: __lowerCamelCase = state_dict[pair_keys[0]].to(torch.floataa ) __lowerCamelCase = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ) # update visited list for item in pair_keys: visited.append(__lowerCAmelCase ) return pipeline if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.7_5, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") SCREAMING_SNAKE_CASE__ : str = parser.parse_args() SCREAMING_SNAKE_CASE__ : Dict = args.base_model_path SCREAMING_SNAKE_CASE__ : Optional[Any] = args.checkpoint_path SCREAMING_SNAKE_CASE__ : Any = args.dump_path SCREAMING_SNAKE_CASE__ : Optional[Any] = args.lora_prefix_unet SCREAMING_SNAKE_CASE__ : Union[str, Any] = args.lora_prefix_text_encoder SCREAMING_SNAKE_CASE__ : Optional[int] = args.alpha SCREAMING_SNAKE_CASE__ : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) SCREAMING_SNAKE_CASE__ : List[str] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
import sys SCREAMING_SNAKE_CASE__ : Any = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def __magic_name__ ( __lowerCAmelCase : str = N ) -> int: __lowerCamelCase = -sys.maxsize - 1 for i in range(len(__lowerCAmelCase ) - 12 ): __lowerCamelCase = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: __lowerCamelCase = product return largest_product if __name__ == "__main__": print(F'{solution() = }')
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from __future__ import annotations from typing import TypedDict class lowerCAmelCase__ ( __lowercase ): a__ : str a__ : int def __magic_name__ ( __lowerCAmelCase : str ) -> list[str]: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(__lowerCAmelCase ) )] def __magic_name__ ( __lowerCAmelCase : str ) -> BWTTransformDict: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) __lowerCamelCase = all_rotations(__lowerCAmelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation __lowerCamelCase = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__lowerCAmelCase ), } return response def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ) -> str: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: __lowerCamelCase = int(__lowerCAmelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(__lowerCAmelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) __lowerCamelCase = [''''''] * len(__lowerCAmelCase ) for _ in range(len(__lowerCAmelCase ) ): for i in range(len(__lowerCAmelCase ) ): __lowerCamelCase = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = "Provide a string that I will generate its BWT transform: " SCREAMING_SNAKE_CASE__ : str = input(entry_msg).strip() SCREAMING_SNAKE_CASE__ : List[str] = bwt_transform(s) print( F'Burrows Wheeler transform for string \'{s}\' results ' F'in \'{result["bwt_string"]}\'' ) SCREAMING_SNAKE_CASE__ : int = reverse_bwt(result["bwt_string"], result["idx_original_string"]) print( F'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ' F'we get original string \'{original_string}\'' )
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
from sklearn.metrics import mean_squared_error import datasets SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" SCREAMING_SNAKE_CASE__ : List[str] = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n" SCREAMING_SNAKE_CASE__ : Tuple = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __A ( self : Union[str, Any] ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def __A ( self : Tuple ) -> Any: if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[str]="uniform_average" , SCREAMING_SNAKE_CASE__ : Dict=True ) -> Union[str, Any]: __lowerCamelCase = mean_squared_error( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ , multioutput=SCREAMING_SNAKE_CASE__ , squared=SCREAMING_SNAKE_CASE__ ) return {"mse": mse}
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : str ) -> list[int]: __lowerCamelCase = int(__lowerCAmelCase ) # Initialize Result __lowerCamelCase = [] # Traverse through all denomination for denomination in reversed(__lowerCAmelCase ): # Find denominations while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ): total_value -= int(__lowerCAmelCase ) answer.append(__lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): SCREAMING_SNAKE_CASE__ : List[str] = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F'Denomination {i}: ').strip())) SCREAMING_SNAKE_CASE__ : Union[str, Any] = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter SCREAMING_SNAKE_CASE__ : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] SCREAMING_SNAKE_CASE__ : Optional[Any] = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F'Following is minimal change for {value}: ') SCREAMING_SNAKE_CASE__ : Union[str, Any] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Union[str, Any] = ReformerTokenizer a__ : Optional[int] = ReformerTokenizerFast a__ : List[str] = True a__ : List[Any] = False a__ : str = True def __A ( self : Union[str, Any] ) -> List[str]: super().setUp() __lowerCamelCase = ReformerTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : str ) -> Tuple: __lowerCamelCase = '''<s>''' __lowerCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_00 ) def __A ( self : Union[str, Any] ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def __A ( self : Any ) -> List[Any]: if not self.test_rust_tokenizer: return __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = '''I was born in 92000, and this is falsé.''' __lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=15 ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __lowerCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # Simple input __lowerCamelCase = '''This is a simple input''' __lowerCamelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] __lowerCamelCase = ('''This is a simple input''', '''This is a pair''') __lowerCamelCase = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) def __A ( self : Optional[int] ) -> int: pass def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = ReformerTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_85, 46, 10, 1_70, 3_82] , ) __lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) __lowerCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __A ( self : Tuple ) -> str: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = '''Hello World!''' __lowerCamelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) @slow def __A ( self : Union[str, Any] ) -> List[Any]: __lowerCamelCase = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) __lowerCamelCase = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) @require_torch @slow def __A ( self : List[str] ) -> Union[str, Any]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence __lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10] __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) __lowerCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) __lowerCamelCase = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) __lowerCamelCase = encoded_sequence['''input_ids'''].shape __lowerCamelCase = ReformerModel(SCREAMING_SNAKE_CASE__ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE__ ) model(**SCREAMING_SNAKE_CASE__ ) @slow def __A ( self : Union[str, Any] ) -> Union[str, Any]: # fmt: off __lowerCamelCase = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 __lowerCamelCase = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=SCREAMING_SNAKE_CASE__ , sequences=SCREAMING_SNAKE_CASE__ , )
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
import math def __magic_name__ ( __lowerCAmelCase : int ) -> list[int]: __lowerCamelCase = [] __lowerCamelCase = 2 __lowerCamelCase = int(math.sqrt(__lowerCAmelCase ) ) # Size of every segment __lowerCamelCase = [True] * (end + 1) __lowerCamelCase = [] while start <= end: if temp[start] is True: in_prime.append(__lowerCAmelCase ) for i in range(start * start , end + 1 , __lowerCAmelCase ): __lowerCamelCase = False start += 1 prime += in_prime __lowerCamelCase = end + 1 __lowerCamelCase = min(2 * end , __lowerCAmelCase ) while low <= n: __lowerCamelCase = [True] * (high - low + 1) for each in in_prime: __lowerCamelCase = math.floor(low / each ) * each if t < low: t += each for j in range(__lowerCAmelCase , high + 1 , __lowerCAmelCase ): __lowerCamelCase = False for j in range(len(__lowerCAmelCase ) ): if temp[j] is True: prime.append(j + low ) __lowerCamelCase = high + 1 __lowerCamelCase = min(high + end , __lowerCAmelCase ) return prime print(sieve(10**6))
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder SCREAMING_SNAKE_CASE__ : Tuple = datasets.utils.logging.get_logger(__name__) class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilderConfig ): a__ : bool = None a__ : bool = None class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilder ): a__ : List[str] = datasets.Audio() a__ : Optional[Any] = """audio""" a__ : Union[str, Any] = AudioFolderConfig a__ : List[str] # definition at the bottom of the script a__ : Union[str, Any] = AudioClassification(audio_column="""audio""" , label_column="""label""" ) SCREAMING_SNAKE_CASE__ : List[Any] = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] SCREAMING_SNAKE_CASE__ : Union[str, Any] = AUDIO_EXTENSIONS
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def __A ( self : Tuple ) -> List[Any]: __lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __lowerCamelCase = '''A painting of a squirrel eating a burger''' __lowerCamelCase = jax.device_count() __lowerCamelCase = num_samples * [prompt] __lowerCamelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = replicate(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = shard(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = jax.random.PRNGKey(0 ) __lowerCamelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() ) __lowerCamelCase = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE__ )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) __lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1] __lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowerCamelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def __A ( self : List[str] ) -> Tuple: __lowerCamelCase = '''stabilityai/stable-diffusion-2''' __lowerCamelCase , __lowerCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder='''scheduler''' ) __lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained( SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , revision='''bf16''' , dtype=jnp.bfloataa , ) __lowerCamelCase = scheduler_params __lowerCamelCase = '''A painting of a squirrel eating a burger''' __lowerCamelCase = jax.device_count() __lowerCamelCase = num_samples * [prompt] __lowerCamelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = replicate(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = shard(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = jax.random.PRNGKey(0 ) __lowerCamelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() ) __lowerCamelCase = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE__ )[0] assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3) __lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1] __lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowerCamelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=13 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=99 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[Any]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=1_28 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : Dict=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Dict: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_input_mask __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope def __A ( self : List[Any] ) -> Union[str, Any]: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_input_mask: __lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : int ) -> Any: return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , ) def __A ( self : List[Any] ) -> Any: ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = self.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: __lowerCamelCase = NezhaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __A ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> List[Any]: __lowerCamelCase = True __lowerCamelCase = NezhaModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: __lowerCamelCase = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ) -> Any: __lowerCamelCase = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: __lowerCamelCase = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , next_sentence_label=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str: __lowerCamelCase = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: __lowerCamelCase = self.num_labels __lowerCamelCase = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str: __lowerCamelCase = self.num_labels __lowerCamelCase = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]: __lowerCamelCase = self.num_choices __lowerCamelCase = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCamelCase = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : Tuple = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) a__ : Dict = ( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) a__ : int = True def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Any: __lowerCamelCase = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def __A ( self : List[Any] ) -> Any: __lowerCamelCase = NezhaModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __A ( self : Optional[Any] ) -> Tuple: self.config_tester.run_common_tests() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> Optional[Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> str: # This regression test was failing with PyTorch < 1.3 ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __lowerCamelCase = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) def __A ( self : List[Any] ) -> Dict: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Optional[Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] ) -> Dict: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> int: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def __A ( self : Any ) -> Tuple: for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow @require_torch_gpu def __A ( self : List[Any] ) -> Union[str, Any]: __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __lowerCamelCase = True __lowerCamelCase = model_class(config=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.jit.trace( SCREAMING_SNAKE_CASE__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''bert.pt''' ) ) __lowerCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , '''bert.pt''' ) , map_location=SCREAMING_SNAKE_CASE__ ) loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE__ ) ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowerCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowerCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0] __lowerCamelCase = torch.Size((1, 6, 7_68) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) @slow def __A ( self : Dict ) -> List[str]: __lowerCamelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) __lowerCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __lowerCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0] __lowerCamelCase = torch.Size((1, 6, 2_11_28) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE__ : Optional[Any] = 16 SCREAMING_SNAKE_CASE__ : Dict = 32 def __magic_name__ ( __lowerCAmelCase : Accelerator , __lowerCAmelCase : int = 16 ) -> Optional[Any]: __lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __lowerCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowerCAmelCase : Optional[int] ): # max_length=None => use the model max length (it's actually the default) __lowerCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCamelCase = datasets.map( __lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowerCAmelCase : Union[str, Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCamelCase = 16 elif accelerator.mixed_precision != "no": __lowerCamelCase = 8 else: __lowerCamelCase = None return tokenizer.pad( __lowerCAmelCase , padding='''longest''' , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors='''pt''' , ) # Instantiate dataloaders. __lowerCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) __lowerCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE__ : Dict = mocked_dataloaders # noqa: F811 def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> List[Any]: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCAmelCase ) == "1": __lowerCamelCase = 2 # New Code # __lowerCamelCase = int(args.gradient_accumulation_steps ) # Initialize accelerator __lowerCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( '''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCamelCase = config['''lr'''] __lowerCamelCase = int(config['''num_epochs'''] ) __lowerCamelCase = int(config['''seed'''] ) __lowerCamelCase = int(config['''batch_size'''] ) __lowerCamelCase = evaluate.load('''glue''' , '''mrpc''' ) set_seed(__lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCamelCase = model.to(accelerator.device ) # Instantiate optimizer __lowerCamelCase = AdamW(params=model.parameters() , lr=__lowerCAmelCase ) # Instantiate scheduler __lowerCamelCase = get_linear_schedule_with_warmup( optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = accelerator.prepare( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Now we train the model for epoch in range(__lowerCAmelCase ): model.train() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowerCAmelCase ): __lowerCamelCase = model(**__lowerCAmelCase ) __lowerCamelCase = output.loss accelerator.backward(__lowerCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __lowerCamelCase = model(**__lowerCAmelCase ) __lowerCamelCase = outputs.logits.argmax(dim=-1 ) __lowerCamelCase , __lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowerCAmelCase , references=__lowerCAmelCase , ) __lowerCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , __lowerCAmelCase ) def __magic_name__ ( ) -> int: __lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) __lowerCamelCase = parser.parse_args() __lowerCamelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
339
import glob import os import random from string import ascii_lowercase, digits import cva SCREAMING_SNAKE_CASE__ : str = "" SCREAMING_SNAKE_CASE__ : Any = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = "" SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def __magic_name__ ( ) -> None: __lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase ) print('''Processing...''' ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for index, image in enumerate(__lowerCAmelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __lowerCamelCase = random_chars(32 ) __lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' ) __lowerCamelCase = [] for anno in new_annos[index]: __lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(__lowerCAmelCase ) with open(f'''/{file_root}.txt''' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]: __lowerCamelCase = [] __lowerCamelCase = [] for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ): __lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(__lowerCAmelCase ) as in_file: __lowerCamelCase = in_file.readlines() __lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' ) __lowerCamelCase = [] for obj_list in obj_lists: __lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__lowerCAmelCase ) labels.append(__lowerCAmelCase ) return img_paths, labels def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]: __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = [] for idx in range(len(__lowerCAmelCase ) ): __lowerCamelCase = [] __lowerCamelCase = img_list[idx] path_list.append(__lowerCAmelCase ) __lowerCamelCase = anno_list[idx] __lowerCamelCase = cva.imread(__lowerCAmelCase ) if flip_type == 1: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase ) for bbox in img_annos: __lowerCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__lowerCAmelCase ) new_imgs_list.append(__lowerCAmelCase ) return new_imgs_list, new_annos_lists, path_list def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str: assert number_char > 1, "The number of character should greater than 1" __lowerCamelCase = ascii_lowercase + digits return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
339
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=None ) -> Optional[Any]: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match''' __lowerCamelCase = nn.Parameter(__lowerCAmelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match''' __lowerCamelCase = nn.Parameter(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> List[Any]: # set torch weights for 1-to-1 comparison __lowerCamelCase = np.asarray(weights[0] ) __lowerCamelCase = np.asarray(weights[1] ) __lowerCamelCase = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , ) def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> List[Any]: # set torch weights for 1-to-1 comparison __lowerCamelCase = np.asarray(weights[0] ) __lowerCamelCase = np.asarray(weights[1] ) __lowerCamelCase = np.asarray(weights[2] ) __lowerCamelCase = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , ) def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple ) -> Tuple: # layernorm 1 __lowerCamelCase = weights[0][0][0] __lowerCamelCase = np.asarray(layer_norm_a[0] ) __lowerCamelCase = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , ) # lsh weights + output __lowerCamelCase = weights[0][1] if len(__lowerCAmelCase ) < 4: set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase ) else: set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase ) # intermediate weighs __lowerCamelCase = weights[2][0][1][2] # Chunked Feed Forward if len(__lowerCAmelCase ) == 4: __lowerCamelCase = intermediate_weights[2] # layernorm 2 __lowerCamelCase = np.asarray(intermediate_weights[0][0] ) __lowerCamelCase = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , ) # intermediate dense __lowerCamelCase = np.asarray(intermediate_weights[1][0] ) __lowerCamelCase = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , ) # intermediate out __lowerCamelCase = np.asarray(intermediate_weights[4][0] ) __lowerCamelCase = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , ) def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> Tuple: # reformer model __lowerCamelCase = torch_model.reformer # word embeds __lowerCamelCase = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , ) if isinstance(weights[3] , __lowerCAmelCase ): __lowerCamelCase = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __lowerCamelCase = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'''{position_embeddings[emb_idx]} emb does not match''' __lowerCamelCase = nn.Parameter(torch.tensor(__lowerCAmelCase ) ) __lowerCamelCase = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( __lowerCAmelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __lowerCamelCase = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # output layer norm __lowerCamelCase = np.asarray(weights[7][0] ) __lowerCamelCase = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , ) # output embeddings __lowerCamelCase = np.asarray(weights[9][0] ) __lowerCamelCase = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ) -> str: # Initialise PyTorch model __lowerCamelCase = ReformerConfig.from_json_file(__lowerCAmelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) __lowerCamelCase = ReformerModelWithLMHead(__lowerCAmelCase ) with open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = pickle.load(__lowerCAmelCase )['''weights'''] set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
339
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/" def __magic_name__ ( __lowerCAmelCase : Any ) -> int: __lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0] @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(rows * cols * num_images ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) __lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) return data @deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict: __lowerCamelCase = labels_dense.shape[0] __lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes __lowerCamelCase = numpy.zeros((num_labels, num_classes) ) __lowerCamelCase = 1 return labels_one_hot @deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' ) def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream: __lowerCamelCase = _readaa(__lowerCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) __lowerCamelCase = _readaa(__lowerCAmelCase ) __lowerCamelCase = bytestream.read(__lowerCAmelCase ) __lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase ) return labels class lowerCAmelCase__ : @deprecated( SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]: __lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) __lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: __lowerCamelCase = 1_00_00 __lowerCamelCase = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' __lowerCamelCase = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 __lowerCamelCase = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. __lowerCamelCase = images.astype(numpy.floataa ) __lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 ) __lowerCamelCase = images __lowerCamelCase = labels __lowerCamelCase = 0 __lowerCamelCase = 0 @property def __A ( self : str ) -> Optional[int]: return self._images @property def __A ( self : Any ) -> Dict: return self._labels @property def __A ( self : List[Any] ) -> int: return self._num_examples @property def __A ( self : str ) -> Any: return self._epochs_completed def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str: if fake_data: __lowerCamelCase = [1] * 7_84 __lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE__ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE__ )], ) __lowerCamelCase = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perma] __lowerCamelCase = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch __lowerCamelCase = self._num_examples - start __lowerCamelCase = self._images[start : self._num_examples] __lowerCamelCase = self._labels[start : self._num_examples] # Shuffle the data if shuffle: __lowerCamelCase = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.images[perm] __lowerCamelCase = self.labels[perm] # Start next epoch __lowerCamelCase = 0 __lowerCamelCase = batch_size - rest_num_examples __lowerCamelCase = self._index_in_epoch __lowerCamelCase = self._images[start:end] __lowerCamelCase = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size __lowerCamelCase = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]: if not gfile.Exists(__lowerCAmelCase ): gfile.MakeDirs(__lowerCAmelCase ) __lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) if not gfile.Exists(__lowerCAmelCase ): urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310 with gfile.GFile(__lowerCAmelCase ) as f: __lowerCamelCase = f.size() print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' ) return filepath @deprecated( __lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase ) __lowerCamelCase = fake() __lowerCamelCase = fake() __lowerCamelCase = fake() return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase ) if not source_url: # empty string check __lowerCamelCase = DEFAULT_SOURCE_URL __lowerCamelCase = '''train-images-idx3-ubyte.gz''' __lowerCamelCase = '''train-labels-idx1-ubyte.gz''' __lowerCamelCase = '''t10k-images-idx3-ubyte.gz''' __lowerCamelCase = '''t10k-labels-idx1-ubyte.gz''' __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_images(__lowerCAmelCase ) __lowerCamelCase = _maybe_download( __lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file ) with gfile.Open(__lowerCAmelCase , '''rb''' ) as f: __lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase ) if not 0 <= validation_size <= len(__lowerCAmelCase ): __lowerCamelCase = ( '''Validation size should be between 0 and ''' f'''{len(__lowerCAmelCase )}. Received: {validation_size}.''' ) raise ValueError(__lowerCAmelCase ) __lowerCamelCase = train_images[:validation_size] __lowerCamelCase = train_labels[:validation_size] __lowerCamelCase = train_images[validation_size:] __lowerCamelCase = train_labels[validation_size:] __lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) __lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
339
1
import argparse import os import re SCREAMING_SNAKE_CASE__ : List[str] = "src/diffusers" # Pattern that looks at the indentation in a line. SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. SCREAMING_SNAKE_CASE__ : int = re.compile(r"^\s*\"([^\"]+)\":") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]") # Pattern that matches `"key",` and puts `key` in group 0. SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(r"^\s*\"([^\"]+)\",\s*$") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"\[([^\]]+)\]") def __magic_name__ ( __lowerCAmelCase : int ) -> str: __lowerCamelCase = _re_indent.search(__lowerCAmelCase ) return "" if search is None else search.groups()[0] def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str="" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=None ) -> List[Any]: __lowerCamelCase = 0 __lowerCamelCase = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(__lowerCAmelCase ): index += 1 __lowerCamelCase = ['''\n'''.join(lines[:index] )] else: __lowerCamelCase = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __lowerCamelCase = [lines[index]] index += 1 while index < len(__lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(__lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(__lowerCAmelCase ) ) if index < len(__lowerCAmelCase ) - 1: __lowerCamelCase = [lines[index + 1]] index += 1 else: __lowerCamelCase = [] else: blocks.append('''\n'''.join(__lowerCAmelCase ) ) __lowerCamelCase = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__lowerCAmelCase ) > 0: blocks.append('''\n'''.join(__lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__lowerCAmelCase ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def __magic_name__ ( __lowerCAmelCase : Optional[int] ) -> Any: def _inner(__lowerCAmelCase : List[str] ): return key(__lowerCAmelCase ).lower().replace('''_''' , '''''' ) return _inner def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=None ) -> Optional[Any]: # If no key is provided, we use a noop. def noop(__lowerCAmelCase : Union[str, Any] ): return x if key is None: __lowerCamelCase = noop # Constants are all uppercase, they go first. __lowerCamelCase = [obj for obj in objects if key(__lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __lowerCamelCase = [obj for obj in objects if key(__lowerCAmelCase )[0].isupper() and not key(__lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. __lowerCamelCase = [obj for obj in objects if not key(__lowerCAmelCase )[0].isupper()] __lowerCamelCase = ignore_underscore(__lowerCAmelCase ) return sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) + sorted(__lowerCAmelCase , key=__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Optional[Any]: # This inner function sort imports between [ ]. def _replace(__lowerCAmelCase : int ): __lowerCamelCase = match.groups()[0] if "," not in imports: return f'''[{imports}]''' __lowerCamelCase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowerCamelCase = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] ) + "]" __lowerCamelCase = import_statement.split('''\n''' ) if len(__lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __lowerCamelCase = 2 if lines[1].strip() == '''[''' else 1 __lowerCamelCase = [(i, _re_strip_line.search(__lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __lowerCamelCase = sort_objects(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] ) __lowerCamelCase = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __lowerCamelCase = _re_bracket_content.sub(_replace , lines[1] ) else: __lowerCamelCase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __lowerCamelCase = keys[:-1] __lowerCamelCase = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(__lowerCAmelCase )] ) return "\n".join(__lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line __lowerCamelCase = _re_bracket_content.sub(_replace , __lowerCAmelCase ) return import_statement def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]=True ) -> Any: with open(__lowerCAmelCase , '''r''' ) as f: __lowerCamelCase = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __lowerCamelCase = split_code_in_indented_blocks( __lowerCAmelCase , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __lowerCamelCase = main_blocks[block_idx] __lowerCamelCase = block.split('''\n''' ) # Get to the start of the imports. __lowerCamelCase = 0 while line_idx < len(__lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __lowerCamelCase = len(__lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(__lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. __lowerCamelCase = '''\n'''.join(block_lines[line_idx:-1] ) __lowerCamelCase = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __lowerCamelCase = split_code_in_indented_blocks(__lowerCAmelCase , indent_level=__lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend __lowerCamelCase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __lowerCamelCase = [(pattern.search(__lowerCAmelCase ).groups()[0] if pattern.search(__lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __lowerCamelCase = [(i, key) for i, key in enumerate(__lowerCAmelCase ) if key is not None] __lowerCamelCase = [x[0] for x in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __lowerCamelCase = 0 __lowerCamelCase = [] for i in range(len(__lowerCAmelCase ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: __lowerCamelCase = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(__lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. __lowerCamelCase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(__lowerCAmelCase ): if check_only: return True else: print(f'''Overwriting {file}.''' ) with open(__lowerCAmelCase , '''w''' ) as f: f.write('''\n'''.join(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : List[str]=True ) -> Union[str, Any]: __lowerCamelCase = [] for root, _, files in os.walk(__lowerCAmelCase ): if "__init__.py" in files: __lowerCamelCase = sort_imports(os.path.join(__lowerCAmelCase , '''__init__.py''' ) , check_only=__lowerCAmelCase ) if result: __lowerCamelCase = [os.path.join(__lowerCAmelCase , '''__init__.py''' )] if len(__lowerCAmelCase ) > 0: raise ValueError(f'''Would overwrite {len(__lowerCAmelCase )} files, run `make style`.''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
339
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } SCREAMING_SNAKE_CASE__ : List[Any] = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } SCREAMING_SNAKE_CASE__ : Dict = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[int] = VOCAB_FILES_NAMES a__ : Any = PRETRAINED_VOCAB_FILES_MAP a__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[Any] = SqueezeBertTokenizer def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[UNK]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[SEP]" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Tuple="[CLS]" , SCREAMING_SNAKE_CASE__ : str="[MASK]" , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]: super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop('''type''' ) ) __lowerCamelCase = do_lower_case __lowerCamelCase = strip_accents __lowerCamelCase = tokenize_chinese_chars __lowerCamelCase = normalizer_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = do_lower_case def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> str: __lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]: __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: __lowerCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
339
1
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Dict = SpeechTaTokenizer a__ : Any = False a__ : Optional[Any] = True def __A ( self : List[str] ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing __lowerCamelCase = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AddedToken('''<mask>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: __lowerCamelCase = '''this is a test''' __lowerCamelCase = '''this is a test''' return input_text, output_text def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Dict=20 , SCREAMING_SNAKE_CASE__ : List[str]=5 ) -> Any: __lowerCamelCase , __lowerCamelCase = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) return text, ids def __A ( self : str ) -> List[Any]: __lowerCamelCase = '''<pad>''' __lowerCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> str: __lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 81 ) def __A ( self : Any ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def __A ( self : int ) -> int: __lowerCamelCase = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) __lowerCamelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] __lowerCamelCase = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) __lowerCamelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} __lowerCamelCase = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.vocab_size __lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def __A ( self : Optional[Any] ) -> Tuple: pass def __A ( self : Optional[Any] ) -> Optional[int]: pass def __A ( self : int ) -> str: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) __lowerCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) __lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) # fmt: off self.assertListEqual(SCREAMING_SNAKE_CASE__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on __lowerCamelCase = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def __A ( self : Tuple ) -> str: # Use custom sequence because this tokenizer does not handle numbers. __lowerCamelCase = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off __lowerCamelCase = { '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=SCREAMING_SNAKE_CASE__ , )
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list[int] ) -> bool: return len(set(__lowerCAmelCase ) ) == len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
# flake8: noqa # Lint as: python3 SCREAMING_SNAKE_CASE__ : Any = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
339
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Dict = { "configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FALCON_PRETRAINED_MODEL_ARCHIVE_LIST", "FalconForCausalLM", "FalconModel", "FalconPreTrainedModel", "FalconForSequenceClassification", "FalconForTokenClassification", "FalconForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
339
1
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list ) -> float: if not nums: raise ValueError('''List is empty''' ) return sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: return abs(__lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. __lowerCamelCase , __lowerCamelCase = y, x % y return abs(__lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: try: __lowerCamelCase = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) __lowerCamelCase = int(nums[0] ) __lowerCamelCase = int(nums[1] ) print( f'''greatest_common_divisor({num_a}, {num_a}) = ''' f'''{greatest_common_divisor(__lowerCAmelCase , __lowerCAmelCase )}''' ) print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCAmelCase , __lowerCAmelCase )}''' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
339
1
def __magic_name__ ( __lowerCAmelCase : list[int] ) -> int: if not numbers: return 0 if not isinstance(__lowerCAmelCase , (list, tuple) ) or not all( isinstance(__lowerCAmelCase , __lowerCAmelCase ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) __lowerCamelCase = __lowerCamelCase = __lowerCamelCase = numbers[0] for i in range(1 , len(__lowerCAmelCase ) ): # update the maximum and minimum subarray products __lowerCamelCase = numbers[i] if number < 0: __lowerCamelCase , __lowerCamelCase = min_till_now, max_till_now __lowerCamelCase = max(__lowerCAmelCase , max_till_now * number ) __lowerCamelCase = min(__lowerCAmelCase , min_till_now * number ) # update the maximum product found till now __lowerCamelCase = max(__lowerCAmelCase , __lowerCAmelCase ) return max_prod
339
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): @slow def __A ( self : Optional[int] ) -> Union[str, Any]: __lowerCamelCase = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' ) __lowerCamelCase = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids __lowerCamelCase = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids __lowerCamelCase = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits __lowerCamelCase = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() __lowerCamelCase = -(labels.shape[-1] * loss.item()) __lowerCamelCase = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
339
1
import cmath import math def __magic_name__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> complex: __lowerCamelCase = math.radians(__lowerCAmelCase ) __lowerCamelCase = math.radians(__lowerCAmelCase ) # Convert voltage and current to rectangular form __lowerCamelCase = cmath.rect(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = cmath.rect(__lowerCAmelCase , __lowerCAmelCase ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
339
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = "bart" SCREAMING_SNAKE_CASE__ : Dict = True @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> str: if LOAD_DENSE_INDEX: __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' ) __lowerCamelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' ) __lowerCamelCase = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' ) __lowerCamelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' ) sas_model.load_state_dict(save_dict['''model'''] ) __lowerCamelCase = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase = make_qa_sas_model( model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> Optional[int]: if LOAD_DENSE_INDEX: __lowerCamelCase = faiss.StandardGpuResources() __lowerCamelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train'''] __lowerCamelCase = np.memmap( '''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase = faiss.IndexFlatIP(128 ) __lowerCamelCase = faiss.index_cpu_to_gpu(__lowerCAmelCase , 1 , __lowerCAmelCase ) wikiaab_gpu_index_flat.add(__lowerCAmelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase = (None, None) __lowerCamelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__lowerCAmelCase ) def __magic_name__ ( ) -> List[str]: __lowerCamelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' ) __lowerCamelCase = elia['''train_eli5'''] __lowerCamelCase = np.memmap( '''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) ) __lowerCamelCase = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__lowerCAmelCase ) return (elia_train, eli5_train_q_index) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_indexes() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = load_models() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = load_train_data() def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=10 ) -> List[str]: __lowerCamelCase = embed_questions_for_retrieval([question] , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase , __lowerCamelCase = eli5_train_q_index.search(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = [elia_train[int(__lowerCAmelCase )] for i in I[0]] return nn_examples def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict="wiki40b" , __lowerCAmelCase : Any="dense" , __lowerCAmelCase : Dict=10 ) -> Union[str, Any]: if source == "none": __lowerCamelCase , __lowerCamelCase = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase = query_qa_dense_index( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: __lowerCamelCase , __lowerCamelCase = query_es_index( __lowerCAmelCase , __lowerCAmelCase , index_name='''english_wiki40b_snippets_100w''' , n_results=__lowerCAmelCase , ) __lowerCamelCase = [ (res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst ] __lowerCamelCase = '''question: {} context: {}'''.format(__lowerCAmelCase , __lowerCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __lowerCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __lowerCAmelCase : None), } ) def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str=64 , __lowerCAmelCase : Dict=256 , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[Any]=0.95 , __lowerCAmelCase : List[Any]=0.8 ) -> Any: with torch.no_grad(): __lowerCamelCase = qa_sas_generate( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , num_answers=1 , num_beams=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase , do_sample=__lowerCAmelCase , temp=__lowerCAmelCase , top_p=__lowerCAmelCase , top_k=__lowerCAmelCase , max_input_length=1024 , device='''cuda:0''' , )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar SCREAMING_SNAKE_CASE__ : List[str] = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" SCREAMING_SNAKE_CASE__ : Dict = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia SCREAMING_SNAKE_CASE__ : int = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n" st.sidebar.markdown(description, unsafe_allow_html=True) SCREAMING_SNAKE_CASE__ : str = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.checkbox("Demo options") if demo_options: SCREAMING_SNAKE_CASE__ : Optional[int] = st.sidebar.selectbox( "", action_list, index=3, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = action_list.index(action_st) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = show_type == "Show full text of passages" else: SCREAMING_SNAKE_CASE__ : Any = 3 SCREAMING_SNAKE_CASE__ : Any = True SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.checkbox("Retrieval options") if retrieval_options: SCREAMING_SNAKE_CASE__ : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n " st.sidebar.markdown(retriever_info) SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) SCREAMING_SNAKE_CASE__ : int = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: SCREAMING_SNAKE_CASE__ : List[str] = "wiki40b" SCREAMING_SNAKE_CASE__ : Optional[Any] = "dense" SCREAMING_SNAKE_CASE__ : str = "beam" SCREAMING_SNAKE_CASE__ : List[Any] = 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = 64 SCREAMING_SNAKE_CASE__ : List[Any] = 256 SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.checkbox("Generation options") if generate_options: SCREAMING_SNAKE_CASE__ : Dict = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n " st.sidebar.markdown(generate_info) SCREAMING_SNAKE_CASE__ : List[str] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) SCREAMING_SNAKE_CASE__ : str = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: SCREAMING_SNAKE_CASE__ : Any = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Dict = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = None # start main text SCREAMING_SNAKE_CASE__ : Any = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] SCREAMING_SNAKE_CASE__ : List[str] = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": SCREAMING_SNAKE_CASE__ : Union[str, Any] = st.text_input("Enter your question here:", "") else: SCREAMING_SNAKE_CASE__ : str = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = make_support(question, source=wiki_source, method="dense", n_results=10) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = make_support(question, source=wiki_source, method="sparse", n_results=10) SCREAMING_SNAKE_CASE__ : int = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] SCREAMING_SNAKE_CASE__ : Optional[Any] = support_list[:10] SCREAMING_SNAKE_CASE__ : Tuple = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): SCREAMING_SNAKE_CASE__ : Optional[int] = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) SCREAMING_SNAKE_CASE__ : Tuple = res[1].strip() if sec_titles == "": SCREAMING_SNAKE_CASE__ : Union[str, Any] = "[{}]({})".format(res[0], wiki_url) else: SCREAMING_SNAKE_CASE__ : Dict = sec_titles.split(" & ") SCREAMING_SNAKE_CASE__ : int = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( "> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: SCREAMING_SNAKE_CASE__ : Any = find_nearest_training(question) SCREAMING_SNAKE_CASE__ : List[Any] = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) SCREAMING_SNAKE_CASE__ : List[Any] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n" st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
339
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. SCREAMING_SNAKE_CASE__ : List[str] = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def __magic_name__ ( __lowerCAmelCase : Any ) -> Optional[int]: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : str ) -> Any: from transformers.testing_utils import pytest_terminal_summary_main __lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ) -> str: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: __lowerCamelCase = 0 # Doctest custom flag to ignore output. SCREAMING_SNAKE_CASE__ : Union[str, Any] = doctest.register_optionflag("IGNORE_RESULT") SCREAMING_SNAKE_CASE__ : Union[str, Any] = doctest.OutputChecker class lowerCAmelCase__ ( __lowercase ): def __A ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[Any] = CustomOutputChecker SCREAMING_SNAKE_CASE__ : Union[str, Any] = HfDoctestModule SCREAMING_SNAKE_CASE__ : int = HfDocTestParser
339
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : str = { "facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json", "facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json", "facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json", "facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json", "facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json", "facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json", "facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json", "facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json", "facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Dict = """xmod""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout __lowerCamelCase = pre_norm __lowerCamelCase = adapter_reduction_factor __lowerCamelCase = adapter_layer_norm __lowerCamelCase = adapter_reuse_layer_norm __lowerCamelCase = ln_before_adapter __lowerCamelCase = list(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = default_language class lowerCAmelCase__ ( __lowercase ): @property def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
339
1
def __magic_name__ ( __lowerCAmelCase : list[int] ) -> float: if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) __lowerCamelCase = sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
339
from collections import namedtuple import requests from lxml import html # type: ignore SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: __lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) ) SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
339
1
from ...configuration_utils import PretrainedConfig SCREAMING_SNAKE_CASE__ : Optional[Any] = { "google/tapas-base-finetuned-sqa": ( "https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json" ), "google/tapas-base-finetuned-wtq": ( "https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json" ), "google/tapas-base-finetuned-wikisql-supervised": ( "https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json" ), "google/tapas-base-finetuned-tabfact": ( "https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json" ), } class lowerCAmelCase__ ( __lowercase ): a__ : Optional[Any] = """tapas""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict=3_05_22 , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=10_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1e-12 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Any=10.0 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=1.0 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : int="ratio" , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Dict=64 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_sizes __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps # Fine-tuning task hyperparameters __lowerCamelCase = positive_label_weight __lowerCamelCase = num_aggregation_labels __lowerCamelCase = aggregation_loss_weight __lowerCamelCase = use_answer_as_supervision __lowerCamelCase = answer_loss_importance __lowerCamelCase = use_normalized_answer_loss __lowerCamelCase = huber_loss_delta __lowerCamelCase = temperature __lowerCamelCase = aggregation_temperature __lowerCamelCase = use_gumbel_for_cells __lowerCamelCase = use_gumbel_for_aggregation __lowerCamelCase = average_approximation_function __lowerCamelCase = cell_selection_preference __lowerCamelCase = answer_loss_cutoff __lowerCamelCase = max_num_rows __lowerCamelCase = max_num_columns __lowerCamelCase = average_logits_per_cell __lowerCamelCase = select_one_column __lowerCamelCase = allow_empty_column_selection __lowerCamelCase = init_cell_selection_weights_to_zero __lowerCamelCase = reset_position_index_per_cell __lowerCamelCase = disable_per_token_loss # Aggregation hyperparameters __lowerCamelCase = aggregation_labels __lowerCamelCase = no_aggregation_label_index if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in aggregation_labels.items()}
339
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ : Optional[str] = field( default=__lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ : bool = field(default=__lowercase , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowerCAmelCase__ : a__ : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ : Optional[int] = field( default=1_024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Source language id for translation."""} ) a__ : Optional[str] = field(default=__lowercase , metadata={"""help""": """Target language id for translation."""} ) a__ : Optional[int] = field(default=__lowercase , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ : bool = field( default=__lowercase , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ) -> Dict: logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , f'''{split}_results.json''' ) ) def __magic_name__ ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = parser.parse_args_into_dataclasses() check_output_dir(__lowerCAmelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __lowerCAmelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCamelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): assert hasattr(__lowerCAmelCase , __lowerCAmelCase ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) __lowerCamelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__lowerCAmelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __lowerCamelCase = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__lowerCAmelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__lowerCAmelCase , __lowerCAmelCase ): __lowerCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __lowerCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__lowerCAmelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __lowerCamelCase = SeqaSeqDataset # Get datasets __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __lowerCamelCase = ( dataset_class( __lowerCAmelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer __lowerCamelCase = ( build_compute_metrics_fn(data_args.task , __lowerCAmelCase ) if training_args.predict_with_generate else None ) __lowerCamelCase = SeqaSeqTrainer( model=__lowerCAmelCase , args=__lowerCAmelCase , data_args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , data_collator=SeqaSeqDataCollator( __lowerCAmelCase , __lowerCAmelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , ) __lowerCamelCase = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) __lowerCamelCase = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __lowerCamelCase = train_result.metrics __lowerCamelCase = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowerCamelCase = trainer.evaluate(metric_key_prefix='''val''' ) __lowerCamelCase = data_args.n_val __lowerCamelCase = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) __lowerCamelCase = trainer.predict(test_dataset=__lowerCAmelCase , metric_key_prefix='''test''' ) __lowerCamelCase = test_output.metrics __lowerCamelCase = data_args.n_test if trainer.is_world_process_zero(): __lowerCamelCase = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , __lowerCAmelCase , training_args.output_dir ) all_metrics.update(__lowerCAmelCase ) if training_args.predict_with_generate: __lowerCamelCase = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase ) __lowerCamelCase = lmap(str.strip , __lowerCAmelCase ) write_txt_file(__lowerCAmelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(__lowerCAmelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
339
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase__ ( __lowercase ): a__ : str = ["""image_processor""", """tokenizer"""] a__ : List[Any] = """AutoImageProcessor""" a__ : Optional[Any] = """AutoTokenizer""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> str: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.image_processor def __call__( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : int ) -> str: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __lowerCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: __lowerCamelCase = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: __lowerCamelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __A ( self : str ) -> Any: return ["input_ids", "attention_mask", "pixel_values"]
339
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCAmelCase__ ( unittest.TestCase ): @property def __A ( self : List[Any] ) -> Optional[Any]: torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def __A ( self : Optional[int] ) -> Optional[Any]: __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = ScoreSdeVeScheduler() __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )[ 0 ] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Tuple ) -> str: __lowerCamelCase = '''google/ncsnpp-church-256''' __lowerCamelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) sde_ve.to(SCREAMING_SNAKE_CASE__ ) sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=SCREAMING_SNAKE_CASE__ ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __lowerCamelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
339
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : int=2_24 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> List[Any]: __lowerCamelCase = size if size is not None else {'''height''': 18, '''width''': 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std def __A ( self : int ) -> List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Tuple = ViTImageProcessor if is_vision_available() else None def __A ( self : str ) -> Optional[int]: __lowerCamelCase = EfficientFormerImageProcessorTester(self ) @property def __A ( self : List[str] ) -> List[Any]: return self.image_proc_tester.prepare_image_processor_dict() def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) def __A ( self : str ) -> List[Any]: pass def __A ( self : Dict ) -> Tuple: # Initialize image_processor __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input __lowerCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def __A ( self : Dict ) -> Dict: # Initialize image_processor __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input __lowerCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def __A ( self : int ) -> Optional[Any]: # Initialize image_processor __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
339
from functools import lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> set: __lowerCamelCase = 2 __lowerCamelCase = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__lowerCAmelCase ) if n > 1: factors.add(__lowerCAmelCase ) return factors @lru_cache def __magic_name__ ( __lowerCAmelCase : int ) -> int: return len(unique_prime_factors(__lowerCAmelCase ) ) def __magic_name__ ( __lowerCAmelCase : list ) -> bool: return len(set(__lowerCAmelCase ) ) in (0, 1) def __magic_name__ ( __lowerCAmelCase : int ) -> list: __lowerCamelCase = 2 while True: # Increment each value of a generated range __lowerCamelCase = [base + i for i in range(__lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. __lowerCamelCase = [upf_len(__lowerCAmelCase ) for x in group] checker.append(__lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(__lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def __magic_name__ ( __lowerCAmelCase : int = 4 ) -> int: __lowerCamelCase = run(__lowerCAmelCase ) return results[0] if len(__lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
339
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests SCREAMING_SNAKE_CASE__ : List[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
339
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase__ : def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]: __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = encoder_seq_length __lowerCamelCase = decoder_seq_length # For common tests __lowerCamelCase = self.decoder_seq_length __lowerCamelCase = is_training __lowerCamelCase = use_attention_mask __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = d_ff __lowerCamelCase = relative_attention_num_buckets __lowerCamelCase = dropout_rate __lowerCamelCase = initializer_factor __lowerCamelCase = eos_token_id __lowerCamelCase = pad_token_id __lowerCamelCase = decoder_start_token_id __lowerCamelCase = None __lowerCamelCase = decoder_layers def __A ( self : Any ) -> Tuple: return TaConfig.from_pretrained('''google/umt5-base''' ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]: if attention_mask is None: __lowerCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __lowerCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: __lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def __A ( self : List[Any] ) -> Tuple: __lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __lowerCamelCase = self.get_config() __lowerCamelCase = config.num_attention_heads __lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, input_dict def __A ( self : Tuple ) -> List[str]: __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def __A ( self : Optional[Any] ) -> Any: return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : List[Any] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() __lowerCamelCase = model( input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) __lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = result.last_hidden_state __lowerCamelCase = result.past_key_values __lowerCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval() # first forward pass __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 ) __lowerCamelCase , __lowerCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] __lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] # select random slice __lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]: __lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval() __lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() ) @require_torch class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): a__ : List[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ : Tuple = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ : int = True a__ : int = False a__ : Tuple = False a__ : Optional[int] = True a__ : Optional[int] = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ : Tuple = [0.8, 0.9] def __A ( self : Tuple ) -> Tuple: __lowerCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def __A ( self : List[str] ) -> Union[str, Any]: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def __A ( self : Union[str, Any] ) -> Any: __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ ) def __A ( self : Any ) -> Any: __lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = config_and_inputs[0] __lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() model.to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ): __lowerCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __lowerCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def __A ( self : Tuple ) -> Optional[Any]: pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def __A ( self : int ) -> Optional[Any]: __lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids # fmt: off __lowerCamelCase = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean SCREAMING_SNAKE_CASE__ : List[str] = 0 SCREAMING_SNAKE_CASE__ : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] SCREAMING_SNAKE_CASE__ : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right SCREAMING_SNAKE_CASE__ : Dict = tuple[int, int] class lowerCAmelCase__ : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Node | None , ) -> None: __lowerCamelCase = pos_x __lowerCamelCase = pos_y __lowerCamelCase = (pos_y, pos_x) __lowerCamelCase = goal_x __lowerCamelCase = goal_y __lowerCamelCase = g_cost __lowerCamelCase = parent __lowerCamelCase = self.calculate_heuristic() __lowerCamelCase = self.g_cost + self.h_cost def __A ( self : int ) -> float: __lowerCamelCase = self.pos_x - self.goal_x __lowerCamelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(SCREAMING_SNAKE_CASE__ ) + abs(SCREAMING_SNAKE_CASE__ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Node ) -> bool: return self.f_cost < other.f_cost class lowerCAmelCase__ : def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : TPosition , SCREAMING_SNAKE_CASE__ : TPosition ) -> str: __lowerCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [self.start] __lowerCamelCase = [] __lowerCamelCase = False def __A ( self : int ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __lowerCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(SCREAMING_SNAKE_CASE__ ) self.closed_nodes.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.get_successors(SCREAMING_SNAKE_CASE__ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE__ ) else: # retrieve the best current path __lowerCamelCase = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE__ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE__ ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE__ ) return [self.start.pos] def __A ( self : str , SCREAMING_SNAKE_CASE__ : Node ) -> list[Node]: __lowerCamelCase = [] for action in delta: __lowerCamelCase = parent.pos_x + action[1] __lowerCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE__ , ) ) return successors def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Node | None ) -> list[TPosition]: __lowerCamelCase = node __lowerCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCamelCase = current_node.parent path.reverse() return path class lowerCAmelCase__ : def __init__( self : str , SCREAMING_SNAKE_CASE__ : TPosition , SCREAMING_SNAKE_CASE__ : TPosition ) -> None: __lowerCamelCase = AStar(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = AStar(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = False def __A ( self : Optional[int] ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __lowerCamelCase = self.fwd_astar.open_nodes.pop(0 ) __lowerCamelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE__ ) self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = current_bwd_node __lowerCamelCase = current_fwd_node __lowerCamelCase = { self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE__ ), self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE__ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(SCREAMING_SNAKE_CASE__ ) else: # retrieve the best current path __lowerCamelCase = astar.open_nodes.pop( astar.open_nodes.index(SCREAMING_SNAKE_CASE__ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(SCREAMING_SNAKE_CASE__ ) else: astar.open_nodes.append(SCREAMING_SNAKE_CASE__ ) return [self.fwd_astar.start.pos] def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Node , SCREAMING_SNAKE_CASE__ : Node ) -> list[TPosition]: __lowerCamelCase = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE__ ) bwd_path.pop() bwd_path.reverse() __lowerCamelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] SCREAMING_SNAKE_CASE__ : Dict = (0, 0) SCREAMING_SNAKE_CASE__ : Tuple = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) SCREAMING_SNAKE_CASE__ : List[Any] = time.time() SCREAMING_SNAKE_CASE__ : str = AStar(init, goal) SCREAMING_SNAKE_CASE__ : List[str] = a_star.search() SCREAMING_SNAKE_CASE__ : Any = time.time() - start_time print(F'AStar execution time = {end_time:f} seconds') SCREAMING_SNAKE_CASE__ : Dict = time.time() SCREAMING_SNAKE_CASE__ : Optional[int] = BidirectionalAStar(init, goal) SCREAMING_SNAKE_CASE__ : int = time.time() - bd_start_time print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
339
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class lowerCAmelCase__ ( __lowercase ): a__ : Union[str, Any] = """open-llama""" def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict: __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = intermediate_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = initializer_range __lowerCamelCase = rms_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = kwargs.pop( '''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_dropout_prob __lowerCamelCase = use_stable_embedding __lowerCamelCase = shared_input_output_embedding __lowerCamelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) __lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
339
1
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> str: __lowerCamelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowerCamelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowerCamelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowerCamelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowerCamelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowerCamelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowerCamelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowerCamelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowerCamelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowerCamelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowerCamelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowerCamelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowerCamelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowerCamelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowerCamelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowerCamelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowerCamelCase = value.float() for key, value in codebook_state_dict.items(): __lowerCamelCase = value return upgrade @torch.no_grad() def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=None ) -> Any: if config_path is not None: __lowerCamelCase = FlavaConfig.from_pretrained(__lowerCAmelCase ) else: __lowerCamelCase = FlavaConfig() __lowerCamelCase = FlavaForPreTraining(__lowerCAmelCase ).eval() __lowerCamelCase = convert_dalle_checkpoint(__lowerCAmelCase , __lowerCAmelCase , save_checkpoint=__lowerCAmelCase ) if os.path.exists(__lowerCAmelCase ): __lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' ) else: __lowerCamelCase = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' ) __lowerCamelCase = upgrade_state_dict(__lowerCAmelCase , __lowerCAmelCase ) hf_model.load_state_dict(__lowerCAmelCase ) __lowerCamelCase = hf_model.state_dict() __lowerCamelCase = count_parameters(__lowerCAmelCase ) __lowerCamelCase = count_parameters(__lowerCAmelCase ) + count_parameters(__lowerCAmelCase ) assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) hf_model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") SCREAMING_SNAKE_CASE__ : int = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
339
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ : Any = TypeVar("KEY") SCREAMING_SNAKE_CASE__ : Dict = TypeVar("VAL") @dataclass(frozen=__lowercase , slots=__lowercase ) class lowerCAmelCase__ ( Generic[KEY, VAL] ): a__ : KEY a__ : VAL class lowerCAmelCase__ ( _Item ): def __init__( self : str ) -> None: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __bool__( self : Tuple ) -> bool: return False SCREAMING_SNAKE_CASE__ : List[Any] = _DeletedItem() class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ): def __init__( self : int , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.75 ) -> None: __lowerCamelCase = initial_block_size __lowerCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __lowerCamelCase = capacity_factor __lowerCamelCase = 0 def __A ( self : Any , SCREAMING_SNAKE_CASE__ : KEY ) -> int: return hash(SCREAMING_SNAKE_CASE__ ) % len(self._buckets ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : int ) -> int: return (ind + 1) % len(self._buckets ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> bool: __lowerCamelCase = self._buckets[ind] if not stored: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self._len += 1 return True elif stored.key == key: __lowerCamelCase = _Item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return True else: return False def __A ( self : Any ) -> bool: __lowerCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(SCREAMING_SNAKE_CASE__ ) def __A ( self : List[Any] ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False __lowerCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __A ( self : int , SCREAMING_SNAKE_CASE__ : int ) -> None: __lowerCamelCase = self._buckets __lowerCamelCase = [None] * new_size __lowerCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __A ( self : str ) -> None: self._resize(len(self._buckets ) * 2 ) def __A ( self : Dict ) -> None: self._resize(len(self._buckets ) // 2 ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY ) -> Iterator[int]: __lowerCamelCase = self._get_bucket_index(SCREAMING_SNAKE_CASE__ ) for _ in range(len(self._buckets ) ): yield ind __lowerCamelCase = self._get_next_ind(SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): if self._try_set(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): break def __setitem__( self : Tuple , SCREAMING_SNAKE_CASE__ : KEY , SCREAMING_SNAKE_CASE__ : VAL ) -> None: if self._is_full(): self._size_up() self._add_item(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __delitem__( self : List[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> None: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: raise KeyError(SCREAMING_SNAKE_CASE__ ) if item is _deleted: continue if item.key == key: __lowerCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : KEY ) -> VAL: for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(SCREAMING_SNAKE_CASE__ ) def __len__( self : int ) -> int: return self._len def __iter__( self : Tuple ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self : Optional[Any] ) -> str: __lowerCamelCase = ''' ,'''.join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
339
1
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : list[str] | None = None ) -> list[list[str]]: __lowerCamelCase = word_bank or [] # create a table __lowerCamelCase = len(__lowerCAmelCase ) + 1 __lowerCamelCase = [] for _ in range(__lowerCAmelCase ): table.append([] ) # seed value __lowerCamelCase = [[]] # because empty string has empty combination # iterate through the indices for i in range(__lowerCAmelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__lowerCAmelCase )] == word: __lowerCamelCase = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__lowerCAmelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__lowerCAmelCase )]: combination.reverse() return table[len(__lowerCAmelCase )] if __name__ == "__main__": print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) print( all_construct( "hexagonosaurus", ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], ) )
339
from datetime import datetime as dt import os from github import Github SCREAMING_SNAKE_CASE__ : Any = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def __magic_name__ ( ) -> Any: __lowerCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) __lowerCamelCase = g.get_repo('''huggingface/transformers''' ) __lowerCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: __lowerCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=__lowerCAmelCase ) __lowerCamelCase = comments[0] if len(__lowerCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
339
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __lowercase , __lowercase , unittest.TestCase ): a__ : str = IFPipeline a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""} a__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS a__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""} def __A ( self : Optional[Any] ) -> Tuple: return self._get_dummy_components() def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict=0 ) -> Optional[Any]: if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __A ( self : Any ) -> Tuple: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __A ( self : Dict ) -> List[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __A ( self : Dict ) -> Any: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __A ( self : Any ) -> str: self._test_save_load_local() def __A ( self : Tuple ) -> Optional[int]: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self : List[str] ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : Dict ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : List[Any] ) -> Optional[int]: # if __lowerCamelCase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) __lowerCamelCase = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) __lowerCamelCase , __lowerCamelCase = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __lowerCamelCase = None __lowerCamelCase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __lowerCamelCase = IFImgaImgPipeline(**pipe_a.components ) __lowerCamelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __lowerCamelCase = IFInpaintingPipeline(**pipe_a.components ) __lowerCamelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: # pipeline 1 _start_torch_memory_measurement() __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (64, 64, 3) __lowerCamelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (2_56, 2_56, 3) __lowerCamelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (64, 64, 3) __lowerCamelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (2_56, 2_56, 3) __lowerCamelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: # pipeline 1 _start_torch_memory_measurement() __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (64, 64, 3) __lowerCamelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # pipeline 2 _start_torch_memory_measurement() __lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''np''' , ) __lowerCamelCase = output.images[0] assert image.shape == (2_56, 2_56, 3) __lowerCamelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __magic_name__ ( ) -> int: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
339
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b" __lowerCamelCase = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
339
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__lowercase ) class lowerCAmelCase__ ( __lowercase ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization a__ : str = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) a__ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) a__ : ClassVar[Features] = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) a__ : str = "question" a__ : str = "context" a__ : str = "answers" @property def __A ( self : str ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
339
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : List[str] ) -> Dict: __lowerCamelCase = tempfile.mkdtemp() # fmt: off __lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCamelCase = {'''unk_token''': '''<unk>'''} __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def __A ( self : Dict ) -> Dict: shutil.rmtree(self.tmpdirname ) def __A ( self : str ) -> Any: __lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : List[Any] ) -> List[str]: __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = self.get_rust_tokenizer() __lowerCamelCase = self.get_image_processor() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Union[str, Any] ) -> int: __lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) __lowerCamelCase = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ ) def __A ( self : Optional[Any] ) -> Union[str, Any]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self : List[Any] ) -> Optional[int]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''lower newer''' __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : Optional[Any] ) -> List[str]: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = self.prepare_image_inputs() __lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE__ ): processor() def __A ( self : List[Any] ) -> Any: __lowerCamelCase = self.get_image_processor() __lowerCamelCase = self.get_tokenizer() __lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = k_size // 2 __lowerCamelCase , __lowerCamelCase = mgrid[0 - center : k_size - center, 0 - center : k_size - center] __lowerCamelCase = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) ) return g def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> List[str]: __lowerCamelCase , __lowerCamelCase = image.shape[0], image.shape[1] # dst image height and width __lowerCamelCase = height - k_size + 1 __lowerCamelCase = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows __lowerCamelCase = zeros((dst_height * dst_width, k_size * k_size) ) __lowerCamelCase = 0 for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ): __lowerCamelCase = ravel(image[i : i + k_size, j : j + k_size] ) __lowerCamelCase = window row += 1 # turn the kernel into shape(k*k, 1) __lowerCamelCase = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = ravel(__lowerCAmelCase ) # reshape and get the dst image __lowerCamelCase = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase ) return dst if __name__ == "__main__": # read original image SCREAMING_SNAKE_CASE__ : Optional[Any] = imread(r"../image_data/lena.jpg") # turn image in gray scale value SCREAMING_SNAKE_CASE__ : int = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size SCREAMING_SNAKE_CASE__ : Optional[Any] = gaussian_filter(gray, 3, sigma=1) SCREAMING_SNAKE_CASE__ : str = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
339
from __future__ import annotations def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : int | None = None , __lowerCAmelCase : int | None = None ) -> None: if start is None: __lowerCamelCase = 0 if end is None: __lowerCamelCase = len(__lowerCAmelCase ) - 1 if start >= end: return __lowerCamelCase = (start + end) // 2 slowsort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) slowsort(__lowerCAmelCase , mid + 1 , __lowerCAmelCase ) if sequence[end] < sequence[mid]: __lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[end] slowsort(__lowerCAmelCase , __lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
339
1
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class lowerCAmelCase__ ( __lowercase , __lowercase ): a__ : List[str] = """pixel_values""" a__ : Any = False a__ : str = TimmBackboneConfig def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple: requires_backends(self , '''timm''' ) super().__init__(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' ) if hasattr(SCREAMING_SNAKE_CASE__ , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) __lowerCamelCase = getattr(SCREAMING_SNAKE_CASE__ , '''use_pretrained_backbone''' , SCREAMING_SNAKE_CASE__ ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. __lowerCamelCase = config.out_indices if getattr(SCREAMING_SNAKE_CASE__ , '''out_indices''' , SCREAMING_SNAKE_CASE__ ) is not None else (-1,) __lowerCamelCase = timm.create_model( config.backbone , pretrained=SCREAMING_SNAKE_CASE__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. __lowerCamelCase = self._backbone.return_layers __lowerCamelCase = {layer['''module''']: str(SCREAMING_SNAKE_CASE__ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(SCREAMING_SNAKE_CASE__ ) @classmethod def __A ( cls : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig __lowerCamelCase = kwargs.pop('''config''' , TimmBackboneConfig() ) __lowerCamelCase = kwargs.pop('''use_timm_backbone''' , SCREAMING_SNAKE_CASE__ ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) __lowerCamelCase = kwargs.pop('''num_channels''' , config.num_channels ) __lowerCamelCase = kwargs.pop('''features_only''' , config.features_only ) __lowerCamelCase = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) __lowerCamelCase = kwargs.pop('''out_indices''' , config.out_indices ) __lowerCamelCase = TimmBackboneConfig( backbone=SCREAMING_SNAKE_CASE__ , num_channels=SCREAMING_SNAKE_CASE__ , features_only=SCREAMING_SNAKE_CASE__ , use_pretrained_backbone=SCREAMING_SNAKE_CASE__ , out_indices=SCREAMING_SNAKE_CASE__ , ) return super()._from_config(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: pass def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : str ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: __lowerCamelCase = return_dict if return_dict is not None else self.config.use_return_dict __lowerCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCamelCase = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone __lowerCamelCase = self._all_layers __lowerCamelCase = self._backbone(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = self._return_layers __lowerCamelCase = tuple(hidden_states[i] for i in self.out_indices ) else: __lowerCamelCase = self._backbone(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = None __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) if hidden_states is not None else None if not return_dict: __lowerCamelCase = (feature_maps,) if output_hidden_states: __lowerCamelCase = output + (hidden_states,) return output return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , attentions=SCREAMING_SNAKE_CASE__ )
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any]=0 ) -> Dict: # Format the message. if name is None: __lowerCamelCase = None else: __lowerCamelCase = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}''' __lowerCamelCase = fmt.format(__lowerCAmelCase ) # Print and recurse (if needed). if isinstance(__lowerCAmelCase , __lowerCAmelCase ): if msg is not None: print(__lowerCAmelCase ) for k in val.keys(): recursive_print(__lowerCAmelCase , val[k] , spaces + 2 ) elif isinstance(__lowerCAmelCase , torch.Tensor ): print(__lowerCAmelCase , ''':''' , val.size() ) else: print(__lowerCAmelCase , ''':''' , __lowerCAmelCase ) def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> str: # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. __lowerCamelCase = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] __lowerCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:] __lowerCamelCase = param.view(*__lowerCAmelCase ) __lowerCamelCase = param.transpose(0 , 2 ) __lowerCamelCase = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] __lowerCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:] __lowerCamelCase = param.view(*__lowerCAmelCase ) __lowerCamelCase = param.transpose(0 , 1 ).contiguous() __lowerCamelCase = param.view(*__lowerCAmelCase ) return param def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str ) -> List[Any]: # The converted output model. __lowerCamelCase = {} # old versions did not store training args __lowerCamelCase = input_state_dict.get('''args''' , __lowerCAmelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) __lowerCamelCase = ds_args.padded_vocab_size __lowerCamelCase = ds_args.max_position_embeddings __lowerCamelCase = ds_args.hidden_size __lowerCamelCase = ds_args.num_layers __lowerCamelCase = ds_args.num_attention_heads __lowerCamelCase = ds_args.ffn_hidden_size # pprint(config) # The number of heads. __lowerCamelCase = config.n_head # The hidden_size per head. __lowerCamelCase = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): __lowerCamelCase = input_state_dict['''checkpoint_version'''] else: __lowerCamelCase = 0.0 # The model. __lowerCamelCase = input_state_dict['''model'''] # The language model. __lowerCamelCase = model['''language_model'''] # The embeddings. __lowerCamelCase = lm['''embedding'''] # The word embeddings. __lowerCamelCase = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. __lowerCamelCase = word_embeddings[: config.vocab_size, :] __lowerCamelCase = word_embeddings # The position embeddings. __lowerCamelCase = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] __lowerCamelCase = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' ) # Store the position embeddings. __lowerCamelCase = pos_embeddings # The transformer. __lowerCamelCase = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. __lowerCamelCase = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. __lowerCamelCase = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. __lowerCamelCase = layer_re.match(__lowerCAmelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. __lowerCamelCase = int(m.group(1 ) ) # The name of the operation. __lowerCamelCase = m.group(2 ) # Is it a weight or a bias? __lowerCamelCase = m.group(3 ) # The name of the layer. __lowerCamelCase = f'''transformer.h.{layer_idx}''' # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): __lowerCamelCase = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' __lowerCamelCase = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. __lowerCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __lowerCAmelCase , __lowerCAmelCase ) __lowerCamelCase = causal_mask # Insert a "dummy" tensor for masked_bias. __lowerCamelCase = torch.tensor(-1E4 , dtype=torch.floataa ) __lowerCamelCase = masked_bias __lowerCamelCase = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. __lowerCamelCase = out_val.transpose(0 , 1 ).contiguous() # Store. __lowerCamelCase = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": __lowerCamelCase = fix_query_key_value_ordering(__lowerCAmelCase , __lowerCAmelCase , 3 , __lowerCAmelCase , __lowerCAmelCase ) # Store. No change of shape. __lowerCamelCase = out_val # Transpose the weights. elif weight_or_bias == "weight": __lowerCamelCase = megatron_to_transformers[op_name] __lowerCamelCase = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": __lowerCamelCase = megatron_to_transformers[op_name] __lowerCamelCase = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. __lowerCamelCase = transformer['''final_layernorm.weight'''] __lowerCamelCase = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. __lowerCamelCase = word_embeddings # It should be done! return output_state_dict def __magic_name__ ( ) -> Optional[Any]: # Create the argument parser. __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=__lowerCAmelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=__lowerCAmelCase , help='''An optional config json file describing the pre-trained model.''' , ) __lowerCamelCase = parser.parse_args() # Extract the basename. __lowerCamelCase = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: __lowerCamelCase = torch.load(__lowerCAmelCase , map_location='''cpu''' ) else: __lowerCamelCase = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) __lowerCamelCase = input_state_dict.get('''args''' , __lowerCAmelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: __lowerCamelCase = '''gelu_fast''' elif ds_args.openai_gelu: __lowerCamelCase = '''gelu_new''' else: __lowerCamelCase = '''gelu''' else: # in the very early days this used to be "gelu_new" __lowerCamelCase = '''gelu_new''' # Spell out all parameters in case the defaults change. __lowerCamelCase = GPTaConfig( vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=__lowerCAmelCase , summary_activation=__lowerCAmelCase , summary_proj_to_labels=__lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=__lowerCAmelCase , use_cache=__lowerCAmelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , ) else: __lowerCamelCase = GPTaConfig.from_json_file(args.config_file ) __lowerCamelCase = ['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) __lowerCamelCase = convert_megatron_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__lowerCAmelCase , __lowerCAmelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: __lowerCamelCase = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": __lowerCamelCase = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": __lowerCamelCase = ds_args.tokenizer_name_or_path else: raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' ) else: __lowerCamelCase = '''gpt2''' __lowerCamelCase = AutoTokenizer.from_pretrained(__lowerCAmelCase ) __lowerCamelCase = type(__lowerCAmelCase ).__name__ __lowerCamelCase = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(__lowerCAmelCase ) # Save tokenizer based on args print(f'''Adding {tokenizer_class} tokenizer files''' ) tokenizer.save_pretrained(__lowerCAmelCase ) # Store the state_dict to file. __lowerCamelCase = os.path.join(__lowerCAmelCase , '''pytorch_model.bin''' ) print(f'''Saving checkpoint to "{output_checkpoint_file}"''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
339
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : str = ShapEImgaImgPipeline a__ : Union[str, Any] = ["""image"""] a__ : Optional[int] = ["""image"""] a__ : Union[str, Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] a__ : List[str] = False @property def __A ( self : Dict ) -> Optional[Any]: return 32 @property def __A ( self : Optional[int] ) -> Optional[int]: return 32 @property def __A ( self : Optional[int] ) -> List[Any]: return self.time_input_dim * 4 @property def __A ( self : str ) -> List[Any]: return 8 @property def __A ( self : Optional[Any] ) -> Union[str, Any]: torch.manual_seed(0 ) __lowerCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Union[str, Any] ) -> Union[str, Any]: __lowerCamelCase = CLIPImageProcessor( crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , ) return image_processor @property def __A ( self : Dict ) -> int: torch.manual_seed(0 ) __lowerCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) return model @property def __A ( self : Tuple ) -> Dict: torch.manual_seed(0 ) __lowerCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ ) return model def __A ( self : Optional[int] ) -> List[str]: __lowerCamelCase = self.dummy_prior __lowerCamelCase = self.dummy_image_encoder __lowerCamelCase = self.dummy_image_processor __lowerCamelCase = self.dummy_renderer __lowerCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , ) __lowerCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int: __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ ) if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def __A ( self : Union[str, Any] ) -> Dict: __lowerCamelCase = '''cpu''' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) __lowerCamelCase = output.images[0] __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCamelCase = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __A ( self : str ) -> Tuple: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = torch_device == '''cpu''' __lowerCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , ) def __A ( self : Dict ) -> Optional[int]: __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 1 __lowerCamelCase = 2 __lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) for key in inputs.keys(): if key in self.batch_params: __lowerCamelCase = batch_size * [inputs[key]] __lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __A ( self : str ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ) -> Union[str, Any]: __lowerCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __lowerCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) __lowerCamelCase = pipe( SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
339
1