code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if self.train_file is not None: A: Tuple = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A: str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())] assert len(__lowercase ) == len(__lowercase ) A: Optional[int] = {c: dataset[c] for c in dataset.column_names} A: Union[str, Any] = refs return Dataset.from_dict(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A: Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) A: Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: A: Any = {} if data_args.train_file is not None: A: int = data_args.train_file if data_args.validation_file is not None: A: Optional[int] = data_args.validation_file A: List[str] = data_args.train_file.split('''.''' )[-1] if extension == "txt": A: int = '''text''' A: Any = load_dataset(__lowercase , data_files=__lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A: Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: A: str = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) A: Tuple = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase ) elif model_args.model_name_or_path: A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: A: List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A: int = datasets['''train'''].column_names else: A: str = datasets['''validation'''].column_names A: Tuple = '''text''' if '''text''' in column_names else column_names[0] A: List[str] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__lowercase ): # Remove empty lines A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length ) A: str = datasets.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A: Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A: List[Any] = False # Data collator # This one will take care of randomly masking the tokens. A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: A: Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A: str = model_args.model_name_or_path else: A: List[str] = None A: str = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation A: Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A: Optional[Any] = trainer.evaluate() A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) A: Dict = perplexity A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
319
'''simple docstring''' import fire from utils import calculate_rouge, save_json def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: A: Any = [x.strip() for x in open(__lowercase ).readlines()] A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )] A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase ) if save_path is not None: save_json(__lowercase , __lowercase , indent=__lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
319
1
'''simple docstring''' import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Any = (KDPMaDiscreteScheduler,) UpperCamelCase_ : str = 10 def _snake_case ( self : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Any ) -> Dict: '''simple docstring''' A: str = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**SCREAMING_SNAKE_CASE_ ) return config def _snake_case ( self : Union[str, Any] ) -> Any: '''simple docstring''' for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> Optional[int]: '''simple docstring''' for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] ) -> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Tuple: '''simple docstring''' A: Union[str, Any] = self.scheduler_classes[0] A: int = self.get_scheduler_config(prediction_type='''v_prediction''' ) A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps ) A: Union[str, Any] = self.dummy_model() A: Any = self.dummy_sample_deter * scheduler.init_noise_sigma A: str = sample.to(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): A: Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Dict = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = output.prev_sample A: int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2 assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def _snake_case ( self : int ) -> Dict: '''simple docstring''' if torch_device == "mps": return A: List[Any] = self.scheduler_classes[0] A: int = self.get_scheduler_config() A: Dict = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps ) A: str = self.dummy_model() A: Dict = self.dummy_sample_deter * scheduler.init_noise_sigma A: Dict = sample.to(SCREAMING_SNAKE_CASE_ ) for i, t in enumerate(scheduler.timesteps ): A: Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = output.prev_sample A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def _snake_case ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' if torch_device == "mps": return A: str = self.scheduler_classes[0] A: Any = self.get_scheduler_config() A: int = scheduler_class(**SCREAMING_SNAKE_CASE_ ) scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE_ ) A: str = self.dummy_model() A: int = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: A: List[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: int = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = output.prev_sample A: List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) ) A: str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) ) if str(SCREAMING_SNAKE_CASE_ ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: List[Any] = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]: A: Tuple = OrderedDict() A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A: Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: int = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[int] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 5_1_2} A: List[str] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: List[str] = {'''visual_embedding_dim''': 2_0_4_8} A: Optional[int] = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: A: Optional[int] = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: str = '''nlvr''' A: Union[str, Any] = VisualBertConfig(**__lowercase ) # Load State Dict A: Union[str, Any] = load_state_dict(__lowercase ) A: str = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: Optional[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Any = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
'''simple docstring''' from itertools import permutations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A: int = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__lowercase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int: return sum( int(''''''.join(map(__lowercase , __lowercase ) ) ) for num in permutations(range(__lowercase ) ) if is_substring_divisible(__lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
319
1
'''simple docstring''' from collections import Counter from timeit import timeit def SCREAMING_SNAKE_CASE( __lowercase = "" , ) -> bool: return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2 def SCREAMING_SNAKE_CASE( __lowercase = "" ) -> bool: if len(__lowercase ) == 0: return True A: Any = input_str.replace(''' ''' , '''''' ).lower() # character_freq_dict: Stores the frequency of every character in the input string A: dict[str, int] = {} for character in lower_case_input_str: A: List[Any] = character_freq_dict.get(__lowercase , 0 ) + 1 A: Optional[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def SCREAMING_SNAKE_CASE( __lowercase = "" ) -> None: print('''\nFor string = ''' , __lowercase , ''':''' ) print( '''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(__lowercase ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) print( '''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(__lowercase ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) if __name__ == "__main__": UpperCamelCase = input( '''Enter string to determine if it can be rearranged as a palindrome or not: ''' ).strip() benchmark(check_str) UpperCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str) print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
319
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
1
'''simple docstring''' from copy import deepcopy class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : list[int] | None = None , SCREAMING_SNAKE_CASE_ : int | None = None ) -> None: '''simple docstring''' if arr is None and size is not None: A: Tuple = size A: Dict = [0] * size elif arr is not None: self.init(SCREAMING_SNAKE_CASE_ ) else: raise ValueError('''Either arr or size must be specified''' ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : list[int] ) -> None: '''simple docstring''' A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) A: List[str] = deepcopy(SCREAMING_SNAKE_CASE_ ) for i in range(1 , self.size ): A: Any = self.next_(SCREAMING_SNAKE_CASE_ ) if j < self.size: self.tree[j] += self.tree[i] def _snake_case ( self : Optional[int] ) -> list[int]: '''simple docstring''' A: Optional[Any] = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): A: int = self.next_(SCREAMING_SNAKE_CASE_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' return index + (index & (-index)) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' return index - (index & (-index)) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value A: Dict = self.next_(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> None: '''simple docstring''' self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' if right == 0: return 0 A: Any = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] A: Dict = self.prev(SCREAMING_SNAKE_CASE_ ) return result def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' return self.query(SCREAMING_SNAKE_CASE_ , index + 1 ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' value -= self.tree[0] if value < 0: return -1 A: Dict = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 A: Optional[Any] = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): raise TypeError('''only integers accepted as input''' ) else: A: str = str(abs(__lowercase ) ) A: int = [list(__lowercase ) for char in range(len(__lowercase ) )] for index in range(len(__lowercase ) ): num_transpositions[index].pop(__lowercase ) return max( int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
319
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @staticmethod @abstractmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError() @abstractmethod def _snake_case ( self : Dict ) -> Optional[int]: '''simple docstring''' raise NotImplementedError()
319
'''simple docstring''' from __future__ import annotations import math def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) A: str = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]: if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) A: Union[str, Any] = len(__lowercase ) A: str = matrix_length // 2 A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )] A: Optional[Any] = [ [a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase ) ] A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )] A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )] return top_left, top_right, bot_left, bot_right def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]: return len(__lowercase ), len(matrix[0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> None: print('''\n'''.join(str(__lowercase ) for line in matrix ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase ) == (2, 2): return default_matrix_multiplication(__lowercase , __lowercase ) A , A , A , A: Union[str, Any] = split_matrix(__lowercase ) A , A , A , A: List[Any] = split_matrix(__lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) A: Any = matrix_addition(__lowercase , __lowercase ) A: List[Any] = matrix_addition(__lowercase , __lowercase ) A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) # construct the new matrix from our 4 quadrants A: Union[str, Any] = [] for i in range(len(__lowercase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__lowercase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]: A: int = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(__lowercase ) A: str = matrix_dimensions(__lowercase ) A: str = matrix_dimensions(__lowercase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] A: Union[str, Any] = max(*__lowercase , *__lowercase ) A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) ) A: List[Any] = matrixa A: Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) A: Any = actual_strassen(__lowercase , __lowercase ) # Removing the additional zeros for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": UpperCamelCase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
319
1
'''simple docstring''' import re def SCREAMING_SNAKE_CASE( __lowercase ) -> str: if len(re.findall('''[ATCG]''' , __lowercase ) ) != len(__lowercase ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = 2 @register_to_config def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = sigma_max # setable values A: int = None A: np.IntTensor = None A: torch.FloatTensor = None # sigma(t_i) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' A: List[Any] = num_inference_steps A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) A: str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A: List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) A: Optional[Any] = sigma + gamma * sigma A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: Union[str, Any] = sample_hat + sigma_hat * model_output A: str = (sample_hat - pred_original_sample) / sigma_hat A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: int = sample_prev + sigma_prev * model_output A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' raise NotImplementedError()
319
1
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate UpperCamelCase = trt.Logger(trt.Logger.WARNING) UpperCamelCase = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--onnx_model_path''', default=None, type=str, required=True, help='''Path to ONNX model: ''', ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''The output directory where the model checkpoints and predictions will be written.''', ) # Other parameters parser.add_argument( '''--tokenizer_name''', default='''''', type=str, required=True, help='''Pretrained tokenizer name or path if not the same as model_name''', ) parser.add_argument( '''--version_2_with_negative''', action='''store_true''', help='''If true, the SQuAD examples contain some that do not have an answer.''', ) parser.add_argument( '''--null_score_diff_threshold''', type=float, default=0.0, help='''If null_score - best_non_null is greater than the threshold predict null.''', ) parser.add_argument( '''--max_seq_length''', default=384, type=int, help=( '''The maximum total input sequence length after WordPiece tokenization. Sequences ''' '''longer than this will be truncated, and sequences shorter than this will be padded.''' ), ) parser.add_argument( '''--doc_stride''', default=128, type=int, help='''When splitting up a long document into chunks, how much stride to take between chunks.''', ) parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''') parser.add_argument( '''--n_best_size''', default=20, type=int, help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''', ) parser.add_argument( '''--max_answer_length''', default=30, type=int, help=( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ), ) parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''') parser.add_argument( '''--dataset_name''', type=str, default=None, required=True, help='''The name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--dataset_config_name''', type=str, default=None, help='''The configuration name of the dataset to use (via the datasets library).''', ) parser.add_argument( '''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.''' ) parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''') parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision instead of 32-bit''', ) parser.add_argument( '''--int8''', action='''store_true''', help='''Whether to use INT8''', ) UpperCamelCase = parser.parse_args() if args.tokenizer_name: UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) logger.info('''Training/evaluation parameters %s''', args) UpperCamelCase = args.per_device_eval_batch_size UpperCamelCase = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties UpperCamelCase = True UpperCamelCase = '''temp_engine/bert-fp32.engine''' if args.fpaa: UpperCamelCase = '''temp_engine/bert-fp16.engine''' if args.inta: UpperCamelCase = '''temp_engine/bert-int8.engine''' # import ONNX file if not os.path.exists('''temp_engine'''): os.makedirs('''temp_engine''') UpperCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, '''rb''') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network UpperCamelCase = [network.get_input(i) for i in range(network.num_inputs)] UpperCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: UpperCamelCase = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) UpperCamelCase = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) UpperCamelCase = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, '''wb''') as f: f.write(engine.serialize()) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: A: Any = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) A: List[str] = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) A: Optional[Any] = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowercase ) # start time A: Tuple = time.time() # Run inference context.execute_async( bindings=[int(__lowercase ) for d_inp in d_inputs] + [int(__lowercase ), int(__lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(__lowercase , __lowercase , __lowercase ) cuda.memcpy_dtoh_async(__lowercase , __lowercase , __lowercase ) # Synchronize the stream and take time stream.synchronize() # end time A: Optional[Any] = time.time() A: Dict = end_time - start_time A: List[str] = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. UpperCamelCase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCamelCase = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('''Evaluation requires a dataset name''') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. UpperCamelCase = raw_datasets['''validation'''].column_names UpperCamelCase = '''question''' if '''question''' in column_names else column_names[0] UpperCamelCase = '''context''' if '''context''' in column_names else column_names[1] UpperCamelCase = '''answers''' if '''answers''' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). UpperCamelCase = tokenizer.padding_side == '''right''' if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) UpperCamelCase = min(args.max_seq_length, tokenizer.model_max_length) def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace A: Tuple = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. A: Union[str, Any] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=__lowercase , stride=args.doc_stride , return_overflowing_tokens=__lowercase , return_offsets_mapping=__lowercase , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. A: Optional[int] = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. A: List[Any] = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). A: List[str] = tokenized_examples.sequence_ids(__lowercase ) A: List[Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. A: int = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. A: Union[str, Any] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples UpperCamelCase = raw_datasets['''validation'''] # Validation Feature Creation UpperCamelCase = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) UpperCamelCase = default_data_collator UpperCamelCase = eval_dataset.remove_columns(['''example_id''', '''offset_mapping''']) UpperCamelCase = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase="eval" ) -> Tuple: # Post-processing: we match the start logits and end logits to answers in the original context. A: str = postprocess_qa_predictions( examples=__lowercase , features=__lowercase , predictions=__lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: A: Tuple = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: A: Union[str, Any] = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] A: str = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=__lowercase , label_ids=__lowercase ) UpperCamelCase = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''') # Evaluation! logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path) with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def SCREAMING_SNAKE_CASE( __lowercase ) -> int: return trt.volume(engine.get_binding_shape(__lowercase ) ) * engine.get_binding_dtype(__lowercase ).itemsize # Allocate device memory for inputs and outputs. UpperCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) UpperCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes) UpperCamelCase = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. UpperCamelCase = cuda.Stream() # Evaluation logger.info('''***** Running Evaluation *****''') logger.info(f' Num examples = {len(eval_dataset)}') logger.info(f' Batch size = {args.per_device_eval_batch_size}') UpperCamelCase = 0.0 UpperCamelCase = 0 UpperCamelCase = timeit.default_timer() UpperCamelCase = None for step, batch in enumerate(eval_dataloader): UpperCamelCase , UpperCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 UpperCamelCase , UpperCamelCase = outputs UpperCamelCase = torch.tensor(start_logits) UpperCamelCase = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered UpperCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) UpperCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) UpperCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) UpperCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: UpperCamelCase = nested_truncate(all_preds, len(eval_dataset)) UpperCamelCase = timeit.default_timer() - start_time logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter)) logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000)) logger.info('''Total Number of Inference = %d''', niter) UpperCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds) UpperCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'Evaluation metrics: {eval_metric}')
319
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if self.train_file is not None: A: Tuple = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A: str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())] assert len(__lowercase ) == len(__lowercase ) A: Optional[int] = {c: dataset[c] for c in dataset.column_names} A: Union[str, Any] = refs return Dataset.from_dict(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A: Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) A: Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: A: Any = {} if data_args.train_file is not None: A: int = data_args.train_file if data_args.validation_file is not None: A: Optional[int] = data_args.validation_file A: List[str] = data_args.train_file.split('''.''' )[-1] if extension == "txt": A: int = '''text''' A: Any = load_dataset(__lowercase , data_files=__lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A: Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: A: str = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) A: Tuple = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase ) elif model_args.model_name_or_path: A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: A: List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A: int = datasets['''train'''].column_names else: A: str = datasets['''validation'''].column_names A: Tuple = '''text''' if '''text''' in column_names else column_names[0] A: List[str] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__lowercase ): # Remove empty lines A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length ) A: str = datasets.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A: Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A: List[Any] = False # Data collator # This one will take care of randomly masking the tokens. A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: A: Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A: str = model_args.model_name_or_path else: A: List[str] = None A: str = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation A: Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A: Optional[Any] = trainer.evaluate() A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) A: Dict = perplexity A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Any: # Return True if there is node that has not iterated. A: int = [False] * len(__lowercase ) A: str = [] queue.append(__lowercase ) A: Dict = True while queue: A: Tuple = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__lowercase ) A: List[Any] = True A: List[Any] = u return visited[t] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]: # This array is filled by BFS and to store path A: Dict = [-1] * (len(__lowercase )) A: Tuple = 0 while bfs(__lowercase , __lowercase , __lowercase , __lowercase ): A: str = float('''Inf''' ) A: List[str] = sink while s != source: # Find the minimum value in select path A: List[Any] = min(__lowercase , graph[parent[s]][s] ) A: Union[str, Any] = parent[s] max_flow += path_flow A: List[str] = sink while v != source: A: Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A: str = parent[v] return max_flow UpperCamelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] UpperCamelCase , UpperCamelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
319
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : Tuple = False def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() A: Optional[int] = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: A: int = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: A: Dict = toks + toks # toks_str = [t[1] for t in toks] A: Union[str, Any] = [t[0] for t in toks] # Ensure consistency A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: A: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: A: Tuple = ''' ''' + output_txt A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Any = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[str] = '''Hello how are you''' A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Optional[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A: List[str] = tokenizer.decode(sample_ids[0] ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: List[Any] = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Optional[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off A: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A: Tuple = tokenizer.decode(sample_ids[0] ) A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _snake_case ( self : int ) -> List[str]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Union[str, Any] = '''Hello how are you''' A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Any = '''Hello how are you''' A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''Hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: str = '''Hello how Are you''' A: Union[str, Any] = '''hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off A: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Any = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: str = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) ) # transform list to ModelOutput A: Dict = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): [recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off A: int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _snake_case ( self : int ) -> int: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _snake_case ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' pass def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: str = tokenizer.vocab_size A: str = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) ) A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) ) A: int = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass def _snake_case ( self : str ) -> Tuple: '''simple docstring''' A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) UpperCamelCase = [ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : list = None ) -> List[Any]: '''simple docstring''' A: Dict = None A: str = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) ) A: Any = os.path.abspath('''examples''' ) for item in os.listdir(SCREAMING_SNAKE_CASE_ ): if item not in EXCLUDE_EXAMPLES: A: List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path: with self.subTest( tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section='''main()''' if parser_only else '''training_function()''' , ): A: Union[str, Any] = compare_against_test( os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''\n'''.join(SCREAMING_SNAKE_CASE_ ) if special_strings is not None: for string in special_strings: A: str = diff.replace(SCREAMING_SNAKE_CASE_ , '''''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''''' ) def _snake_case ( self : Tuple ) -> List[str]: '''simple docstring''' self.one_complete_example('''complete_nlp_example.py''' , SCREAMING_SNAKE_CASE_ ) self.one_complete_example('''complete_nlp_example.py''' , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Tuple = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) ) A: Optional[Any] = [ ''' ''' * 16 + '''{\n\n''', ''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''', ''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''', ''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''', ''' ''' * 20 + '''"epoch": epoch,\n\n''', ''' ''' * 16 + '''},\n\n''', ''' ''' * 16 + '''step=epoch,\n''', ''' ''' * 12, ''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''', ] self.one_complete_example('''complete_cv_example.py''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.one_complete_example('''complete_cv_example.py''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = False @classmethod def _snake_case ( cls : Any ) -> Any: '''simple docstring''' super().setUpClass() A: Any = tempfile.mkdtemp() A: int = os.path.join(cls._tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) A: Tuple = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def _snake_case ( cls : Any ) -> Dict: '''simple docstring''' super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _snake_case ( self : Tuple ) -> List[str]: '''simple docstring''' A: List[str] = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() A: Union[str, Any] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) ) def _snake_case ( self : int ) -> Tuple: '''simple docstring''' A: str = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() A: Optional[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ ) self.assertNotIn('''epoch 0:''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' A: Optional[Any] = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() A: Tuple = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ ) if torch.cuda.is_available(): A: Any = torch.cuda.device_count() else: A: int = 1 if num_processes > 1: self.assertNotIn('''epoch 0:''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE_ ) else: self.assertIn('''epoch 0:''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''epoch 1:''' , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: List[str] = ''' examples/by_feature/cross_validation.py --num_folds 2 '''.split() with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ): A: List[Any] = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ ) A: List[str] = re.findall('''({.+})''' , SCREAMING_SNAKE_CASE_ ) A: Tuple = [r for r in results if '''accuracy''' in r][-1] A: Dict = ast.literal_eval(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(results['''accuracy'''] , 0.75 ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = ['''examples/by_feature/multi_process_metrics.py'''] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def _snake_case ( self : Optional[int] ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: A: Optional[Any] = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''tracking''' ) ) ) def _snake_case ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' A: Dict = ['''examples/by_feature/gradient_accumulation.py'''] run_command(self._launch_args + testargs ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' A: List[Any] = ['''examples/by_feature/local_sgd.py'''] run_command(self._launch_args + testargs )
319
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = '''''' A: Optional[Any] = '''''' A: str = [] A: Union[str, Any] = 0 A: str = 2_56 A: Optional[Any] = 0 A: Dict = 0 A: List[str] = 0 A: int = 0 def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE_ , 0 ) A: str = copy.deepcopy(self.img ) A , A , A: List[Any] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' ) A: Union[str, Any] = np.sum(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Tuple = x[i] / self.k self.sk += prk A: Optional[int] = (self.L - 1) * self.sk if self.rem != 0: A: Dict = int(last % last ) A: Any = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = int(np.ma.count(self.img ) / self.img[1].size ) A: Dict = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): A: int = self.img[j][i] if num != self.last_list[num]: A: str = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' plt.hist(self.img.ravel() , 2_56 , [0, 2_56] ) def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') UpperCamelCase = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
319
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports UpperCamelCase = ''' import os ''' UpperCamelCase = ''' def foo(): import os return False ''' UpperCamelCase = ''' def foo(): def bar(): if True: import os return False return bar() ''' UpperCamelCase = ''' import os try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except ImportError as e: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' UpperCamelCase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Tuple = os.path.join(__lowercase , '''test_file.py''' ) with open(__lowercase , '''w''' ) as _tmp_file: _tmp_file.write(__lowercase ) A: List[Any] = get_imports(__lowercase ) assert parsed_imports == ["os"]
319
1
'''simple docstring''' import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 UpperCamelCase = { '''return_dict''': False, '''output_hidden_states''': True, '''output_attentions''': True, '''torchscript''': True, '''torch_dtype''': '''float16''', '''use_bfloat16''': True, '''tf_legacy_loss''': True, '''pruned_heads''': {'''a''': 1}, '''tie_word_embeddings''': False, '''is_decoder''': True, '''cross_attention_hidden_size''': 128, '''add_cross_attention''': True, '''tie_encoder_decoder''': True, '''max_length''': 50, '''min_length''': 3, '''do_sample''': True, '''early_stopping''': True, '''num_beams''': 3, '''num_beam_groups''': 3, '''diversity_penalty''': 0.5, '''temperature''': 2.0, '''top_k''': 10, '''top_p''': 0.7, '''typical_p''': 0.2, '''repetition_penalty''': 0.8, '''length_penalty''': 0.8, '''no_repeat_ngram_size''': 5, '''encoder_no_repeat_ngram_size''': 5, '''bad_words_ids''': [1, 2, 3], '''num_return_sequences''': 3, '''chunk_size_feed_forward''': 5, '''output_scores''': True, '''return_dict_in_generate''': True, '''forced_bos_token_id''': 2, '''forced_eos_token_id''': 3, '''remove_invalid_values''': True, '''architectures''': ['''BertModel'''], '''finetuning_task''': '''translation''', '''id2label''': {0: '''label'''}, '''label2id''': {'''label''': '''0'''}, '''tokenizer_class''': '''BertTokenizerFast''', '''prefix''': '''prefix''', '''bos_token_id''': 6, '''pad_token_id''': 7, '''eos_token_id''': 8, '''sep_token_id''': 9, '''decoder_start_token_id''': 10, '''exponential_decay_length_penalty''': (5, 1.01), '''suppress_tokens''': [0, 1], '''begin_suppress_tokens''': 2, '''task_specific_params''': {'''translation''': '''some_params'''}, '''problem_type''': '''regression''', } @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @classmethod def _snake_case ( cls : int ) -> List[Any]: '''simple docstring''' A: Dict = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def _snake_case ( cls : List[str] ) -> Union[str, Any]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def _snake_case ( self : List[Any] ) -> str: '''simple docstring''' A: str = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) A: Optional[Any] = BertConfig.from_pretrained(f"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , repo_id='''test-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) A: Dict = BertConfig.from_pretrained(f"""{USER}/test-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Optional[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) A: str = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) A: List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Dict ) -> str: '''simple docstring''' CustomConfig.register_for_auto_class() A: Any = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) A: Optional[int] = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Union[str, Any] ) -> Any: '''simple docstring''' A: Optional[int] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated A: int = c.n_embd + 1 # int A: Dict = c.resid_pdrop + 1.0 # float A: Tuple = not c.scale_attn_weights # bool A: Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" ) self.assertEqual(SCREAMING_SNAKE_CASE_ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , c.summary_type , '''mismatch for key: summary_type''' ) def _snake_case ( self : Optional[int] ) -> Tuple: '''simple docstring''' A: List[str] = PretrainedConfig() A: int = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( SCREAMING_SNAKE_CASE_ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) A: str = [key for key, value in config_common_kwargs.items() if value == getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] if len(SCREAMING_SNAKE_CASE_ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f""" {', '.join(SCREAMING_SNAKE_CASE_ )}.""" ) def _snake_case ( self : str ) -> Dict: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): # config is in subfolder, the following should not work without specifying the subfolder A: Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) A: Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: Union[str, Any] = mock.Mock() A: Optional[int] = 5_00 A: int = {} A: str = HTTPError A: int = {} # Download this model to make sure it's in the cache. A: List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE_ ) as mock_head: A: Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: List[Any] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def _snake_case ( self : List[str] ) -> Tuple: '''simple docstring''' A: Optional[Any] = AutoConfig.from_pretrained('''bert-base-cased''' ) A: Optional[int] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: int = 2 json.dump(configuration.to_dict() , open(os.path.join(SCREAMING_SNAKE_CASE_ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 A: str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 A: Union[str, Any] = ['''config.42.0.0.json'''] A: Union[str, Any] = 7_68 configuration.save_pretrained(SCREAMING_SNAKE_CASE_ ) shutil.move(os.path.join(SCREAMING_SNAKE_CASE_ , '''config.4.0.0.json''' ) , os.path.join(SCREAMING_SNAKE_CASE_ , '''config.42.0.0.json''' ) ) A: str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertEqual(new_configuration.hidden_size , 7_68 ) def _snake_case ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers A: Optional[int] = '''v4.0.0''' A , A: Union[str, Any] = new_transformers.models.auto.AutoConfig.from_pretrained( SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers A: Dict = '''v3.0.0''' A: Tuple = old_transformers.models.auto.AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertEqual(old_configuration.hidden_size , 7_68 )
319
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]: A: str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: for i in range(config.num_hidden_layers ): A: Tuple = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A: Dict = in_proj_weight[ : config.hidden_size, : ] A: int = in_proj_bias[: config.hidden_size] A: Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A: int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A: Optional[int] = in_proj_weight[ -config.hidden_size :, : ] A: Optional[Any] = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Optional[int] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: List[Any] = dct.pop(__lowercase ) A: int = val @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase ) A: Tuple = False A: str = False A: List[Any] = False A: Optional[int] = False if "vqa" in checkpoint_url: A: Union[str, Any] = True A: Union[str, Any] = 3_1_2_9 A: List[Any] = '''huggingface/label-files''' A: Any = '''vqa2-id2label.json''' A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()} A: Any = idalabel A: Optional[Any] = {v: k for k, v in idalabel.items()} A: List[str] = ViltForQuestionAnswering(__lowercase ) elif "nlvr" in checkpoint_url: A: Dict = True A: str = 2 A: Union[str, Any] = {0: '''False''', 1: '''True'''} A: Any = {v: k for k, v in config.idalabel.items()} A: Optional[Any] = 3 A: Any = ViltForImagesAndTextClassification(__lowercase ) elif "irtr" in checkpoint_url: A: Tuple = True A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase ) elif "mlm_itm" in checkpoint_url: A: Tuple = True A: Optional[int] = ViltForMaskedLM(__lowercase ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict'''] A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) if mlm_model or irtr_model: A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__lowercase ) # Define processor A: Optional[Any] = ViltImageProcessor(size=3_8_4 ) A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' ) A: Optional[int] = ViltProcessor(__lowercase , __lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: Any = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[str] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw ) if mlm_model: A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].''' else: A: Optional[int] = '''How many cats are there?''' A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: Any = model(**__lowercase ) # Verify outputs if mlm_model: A: Any = torch.Size([1, 1_1, 3_0_5_2_2] ) A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify masked token prediction equals "cats" A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: A: Any = torch.Size([1, 3_1_2_9] ) A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify vqa prediction equals "2" A: Dict = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: A: Union[str, Any] = torch.Size([1, 2] ) A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ["""input_features"""] def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : int=1_60_00 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_60 , SCREAMING_SNAKE_CASE_ : int=30 , SCREAMING_SNAKE_CASE_ : Any=4_00 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Any=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Union[str, Any]: '''simple docstring''' super().__init__( feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: str = n_fft A: List[Any] = hop_length A: List[str] = chunk_length A: Optional[Any] = chunk_length * sampling_rate A: List[str] = self.n_samples // hop_length A: Union[str, Any] = sampling_rate A: Tuple = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.array ) -> np.ndarray: '''simple docstring''' A: List[Any] = spectrogram( SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , ) A: Tuple = log_spec[:, :-1] A: List[str] = np.maximum(SCREAMING_SNAKE_CASE_ , log_spec.max() - 8.0 ) A: Dict = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _snake_case ( SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : float = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: A: int = np.array(SCREAMING_SNAKE_CASE_ , np.intaa ) A: List[str] = [] for vector, length in zip(SCREAMING_SNAKE_CASE_ , attention_mask.sum(-1 ) ): A: str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: A: Dict = padding_value normed_input_values.append(SCREAMING_SNAKE_CASE_ ) else: A: Optional[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "max_length" , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) A: List[str] = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) A: Union[str, Any] = is_batched_numpy or ( isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: A: Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): A: Dict = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): A: Tuple = raw_speech.astype(np.floataa ) # always return batch if not is_batched: A: Any = [np.asarray([raw_speech] ).T] A: int = BatchFeature({'''input_features''': raw_speech} ) # convert into correct format for padding A: Dict = self.pad( SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=max_length if max_length else self.n_samples , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: A: Any = self.zero_mean_unit_var_norm( padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , ) A: Optional[int] = np.stack(padded_inputs['''input_features'''] , axis=0 ) # make sure list is in array format A: Dict = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 ) A: Tuple = [self._np_extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in input_features[0]] if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ): A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features] else: A: str = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) A: Optional[int] = padded_inputs['''attention_mask'''][:, :: self.hop_length] if return_tensors is not None: A: Union[str, Any] = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ ) return padded_inputs def _snake_case ( self : Optional[int] ) -> Dict[str, Any]: '''simple docstring''' A: Optional[Any] = copy.deepcopy(self.__dict__ ) A: Dict = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
319
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } UpperCamelCase = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: A: Tuple = EfficientNetConfig() A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim'''] A: Optional[int] = CONFIG_MAP[model_name]['''width_coef'''] A: str = CONFIG_MAP[model_name]['''depth_coef'''] A: Dict = CONFIG_MAP[model_name]['''image_size'''] A: str = CONFIG_MAP[model_name]['''dropout_rate'''] A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding'''] A: Optional[Any] = '''huggingface/label-files''' A: List[str] = '''imagenet-1k-id2label.json''' A: Dict = 1_0_0_0 A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()} A: int = idalabel A: Tuple = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE( ) -> Any: A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: List[str] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , ) return preprocessor def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] A: List[str] = sorted(set(__lowercase ) ) A: Dict = len(__lowercase ) A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )} A: Optional[int] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: A: int = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) A: Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: A: str = '''efficientnet.''' + item[1] A: int = '''classifier.weight''' A: Tuple = '''classifier.bias''' return key_mapping def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue A: Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) ) else: A: Any = torch.from_numpy(__lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__lowercase ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: A: Optional[int] = model_classes[model_name]( include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , ) A: List[str] = original_model.trainable_variables A: Optional[Any] = original_model.non_trainable_variables A: Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A: int = param.numpy() A: Tuple = list(tf_params.keys() ) # Load HuggingFace model A: Dict = get_efficientnet_config(__lowercase ) A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval() A: Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) A: int = rename_keys(__lowercase ) replace_params(__lowercase , __lowercase , __lowercase ) # Initialize preprocessor and preprocess input image A: List[Any] = convert_image_processor(__lowercase ) A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): A: str = hf_model(**__lowercase ) A: List[Any] = outputs.logits.detach().numpy() # Original model inference A: Any = False A: List[Any] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A: str = image.img_to_array(__lowercase ) A: Dict = np.expand_dims(__lowercase , axis=0 ) A: Any = original_model.predict(__lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(__lowercase ): os.mkdir(__lowercase ) # Save converted model and image processor hf_model.save_pretrained(__lowercase ) preprocessor.save_pretrained(__lowercase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) A: int = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(__lowercase ) hf_model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') UpperCamelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase = 16 UpperCamelCase = 32 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 1_6 ) -> Optional[Any]: A: int = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A: Any = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__lowercase ): # max_length=None => use the model max length (it's actually the default) A: Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A: Dict = datasets.map( __lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A: Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__lowercase ): # On TPU it's best to pad everything to the same length or training will be very slow. A: Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A: int = 1_6 elif accelerator.mixed_precision != "no": A: Optional[int] = 8 else: A: Optional[int] = None return tokenizer.pad( __lowercase , padding='''longest''' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='''pt''' , ) # Instantiate dataloaders. A: str = DataLoader( tokenized_datasets['''train'''] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase ) A: Optional[int] = DataLoader( tokenized_datasets['''validation'''] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[int]: # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowercase ) == "1": A: str = 2 # New Code # A: List[Any] = int(args.gradient_accumulation_steps ) # Initialize accelerator A: Any = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( '''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A: Union[str, Any] = config['''lr'''] A: List[Any] = int(config['''num_epochs'''] ) A: int = int(config['''seed'''] ) A: Dict = int(config['''batch_size'''] ) A: int = evaluate.load('''glue''' , '''mrpc''' ) set_seed(__lowercase ) A , A: List[Any] = get_dataloaders(__lowercase , __lowercase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A: Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowercase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A: Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer A: int = AdamW(params=model.parameters() , lr=__lowercase ) # Instantiate scheduler A: Optional[Any] = get_linear_schedule_with_warmup( optimizer=__lowercase , num_warmup_steps=1_0_0 , num_training_steps=(len(__lowercase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A: Any = accelerator.prepare( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) # Now we train the model for epoch in range(__lowercase ): model.train() for step, batch in enumerate(__lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__lowercase ): A: str = model(**__lowercase ) A: int = output.loss accelerator.backward(__lowercase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__lowercase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A: List[Any] = model(**__lowercase ) A: Any = outputs.logits.argmax(dim=-1 ) A , A: Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__lowercase , references=__lowercase , ) A: Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __lowercase ) def SCREAMING_SNAKE_CASE( ) -> Optional[Any]: A: List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__lowercase , default=__lowercase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=__lowercase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) A: Optional[int] = parser.parse_args() A: str = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(__lowercase , __lowercase ) if __name__ == "__main__": main()
319
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { '''configuration_x_clip''': [ '''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XCLIPConfig''', '''XCLIPTextConfig''', '''XCLIPVisionConfig''', ], '''processing_x_clip''': ['''XCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XCLIPModel''', '''XCLIPPreTrainedModel''', '''XCLIPTextModel''', '''XCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = Dict[str, Any] UpperCamelCase = List[Prediction] @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Any = {} if "threshold" in kwargs: A: List[Any] = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: int = load_image(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = torch.IntTensor([[image.height, image.width]] ) A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) A: Any = target_size return inputs def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' A: Tuple = model_inputs.pop('''target_size''' ) A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ ) A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: A: Dict = model_inputs['''bbox'''] return model_outputs def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A , A: Union[str, Any] = target_size[0].tolist() def unnormalize(SCREAMING_SNAKE_CASE_ : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )] A: Dict = ['''score''', '''label''', '''box'''] A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = raw_annotations[0] A: List[Any] = raw_annotation['''scores'''] A: List[Any] = raw_annotation['''labels'''] A: int = raw_annotation['''boxes'''] A: Any = scores.tolist() A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels] A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A: Tuple = ['''score''', '''label''', '''box'''] A: str = [ dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) A , A , A , A: str = box.int().tolist() A: str = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: A: Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
319
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = """convbert""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Dict = vocab_size A: Tuple = hidden_size A: Optional[int] = num_hidden_layers A: List[str] = num_attention_heads A: int = intermediate_size A: int = hidden_act A: List[str] = hidden_dropout_prob A: int = attention_probs_dropout_prob A: Tuple = max_position_embeddings A: Any = type_vocab_size A: str = initializer_range A: Union[str, Any] = layer_norm_eps A: str = embedding_size A: Optional[int] = head_ratio A: List[Any] = conv_kernel_size A: List[Any] = num_groups A: Optional[int] = classifier_dropout class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A: List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
319
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase = { '''configuration_blip''': [ '''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlipConfig''', '''BlipTextConfig''', '''BlipVisionConfig''', ], '''processing_blip''': ['''BlipProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''BlipImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlipModel''', '''BlipPreTrainedModel''', '''BlipForConditionalGeneration''', '''BlipForQuestionAnswering''', '''BlipVisionModel''', '''BlipTextModel''', '''BlipForImageTextRetrieval''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBlipModel''', '''TFBlipPreTrainedModel''', '''TFBlipForConditionalGeneration''', '''TFBlipForQuestionAnswering''', '''TFBlipVisionModel''', '''TFBlipTextModel''', '''TFBlipForImageTextRetrieval''', ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if len(__lowercase ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) A: Any = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } UpperCamelCase = { '''yjernite/retribert-base-uncased''': 512, } UpperCamelCase = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[str] = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION UpperCamelCase_ : int = RetriBertTokenizer UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : int="[UNK]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[SEP]" , SCREAMING_SNAKE_CASE_ : int="[PAD]" , SCREAMING_SNAKE_CASE_ : Dict="[CLS]" , SCREAMING_SNAKE_CASE_ : int="[MASK]" , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=None , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Dict: '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars ): A: List[Any] = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('''type''' ) ) A: List[Any] = do_lower_case A: int = strip_accents A: Any = tokenize_chinese_chars A: List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ ) A: List[str] = do_lower_case def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> Any: '''simple docstring''' A: str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: int = [self.sep_token_id] A: str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A: Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ )
319
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: from transformers.testing_utils import pytest_terminal_summary_main A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: A: Tuple = 0 # Doctest custom flag to ignore output. UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''') UpperCamelCase = doctest.OutputChecker class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase = CustomOutputChecker UpperCamelCase = HfDoctestModule UpperCamelCase = HfDocTestParser
319
1
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def SCREAMING_SNAKE_CASE( ) -> Optional[int]: with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" A: Optional[Any] = [1, 2, 3] with pytest.raises(__lowercase ): with parallel_backend('''unsupported backend''' ): map_nested(__lowercase , __lowercase , num_proc=2 ) with pytest.raises(__lowercase ): with parallel_backend('''unsupported backend''' ): map_nested(__lowercase , __lowercase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: A: List[str] = [1, 2] A: Tuple = {'''a''': 1, '''b''': 2} A: List[str] = {'''a''': [1, 2], '''b''': [3, 4]} A: Dict = {'''a''': {'''1''': 1}, '''b''': 2} A: List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} A: Any = [2, 3] A: str = {'''a''': 2, '''b''': 3} A: Optional[Any] = {'''a''': [2, 3], '''b''': [4, 5]} A: Tuple = {'''a''': {'''1''': 2}, '''b''': 3} A: List[str] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} with parallel_backend('''spark''' ): assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa assert map_nested(__lowercase , __lowercase , num_proc=__lowercase ) == expected_map_nested_sa
319
'''simple docstring''' import heapq import sys import numpy as np UpperCamelCase = tuple[int, int] class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> str: '''simple docstring''' A: Any = [] A: int = set() def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' return len(self.elements ) == 0 def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(SCREAMING_SNAKE_CASE_ ) else: # update # print("update", item) A: Optional[int] = [] ((A) , (A)): str = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((A) , (A)): int = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any: '''simple docstring''' if item in self.set: self.set.remove(SCREAMING_SNAKE_CASE_ ) A: str = [] ((A) , (A)): List[str] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((A) , (A)): Any = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return self.elements[0][1] def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' ((A) , (A)): Dict = heapq.heappop(self.elements ) self.set.remove(SCREAMING_SNAKE_CASE_ ) return (priority, item) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: # euclidean distance A: List[str] = np.array(__lowercase ) A: Optional[int] = np.array(__lowercase ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: # integer division by time variable return consistent_heuristic(__lowercase , __lowercase ) // t def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]: A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase ) return ans def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]: A: Union[str, Any] = np.chararray((n, n) ) for i in range(__lowercase ): for j in range(__lowercase ): A: Union[str, Any] = '''*''' for i in range(__lowercase ): for j in range(__lowercase ): if (j, (n - 1) - i) in blocks: A: Optional[Any] = '''#''' A: Tuple = '''-''' A: List[str] = back_pointer[goal] while x != start: ((A) , (A)): Tuple = x # print(x) A: List[str] = '''-''' A: str = back_pointer[x] A: Dict = '''-''' for i in range(__lowercase ): for j in range(__lowercase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A: List[str] = back_pointer[goal] while x != start: print(__lowercase , end=''' ''' ) A: Optional[int] = back_pointer[x] print(__lowercase ) sys.exit() def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]: for itera in range(__lowercase ): open_list[itera].remove_element(__lowercase ) # print("s", s) # print("j", j) ((A) , (A)): Tuple = s A: Optional[Any] = (x - 1, y) A: str = (x + 1, y) A: List[Any] = (x, y + 1) A: int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__lowercase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__lowercase ) A: int = -1 A: int = float('''inf''' ) if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1: A: List[str] = g_function[s] + 1 A: List[str] = s if neighbours not in close_list_anchor: open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) ) if neighbours not in close_list_inad: for var in range(1 , __lowercase ): if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key( __lowercase , 0 , __lowercase , __lowercase ): open_list[j].put( __lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( ) -> Tuple: A: str = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCamelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCamelCase = make_common_ground() UpperCamelCase = blocks_blk # hyper parameters UpperCamelCase = 1 UpperCamelCase = 1 UpperCamelCase = 20 UpperCamelCase = 3 # one consistent and two other inconsistent # start and end destination UpperCamelCase = (0, 0) UpperCamelCase = (n - 1, n - 1) UpperCamelCase = 1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: int = {start: 0, goal: float('''inf''' )} A: Union[str, Any] = {start: -1, goal: -1} A: List[Any] = [] A: Union[str, Any] = set() for i in range(__lowercase ): open_list.append(PriorityQueue() ) open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) A: list[int] = [] A: list[int] = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , __lowercase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A , A: Union[str, Any] = open_list[i].top_show() visited.add(__lowercase ) expand_state( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_inad.append(__lowercase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A: Union[str, Any] = open_list[0].top_show() visited.add(__lowercase ) expand_state( __lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_anchor.append(__lowercase ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__lowercase ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> str: # noqa: E741 A: str = len(__lowercase ) A: Optional[Any] = 0 A: Any = [0] * n A: List[Any] = [False] * n A: List[str] = [False] * n def dfs(__lowercase , __lowercase , __lowercase , __lowercase ): if parent == root: out_edge_count += 1 A: Optional[Any] = True A: Optional[Any] = at for to in l[at]: if to == parent: pass elif not visited[to]: A: List[str] = dfs(__lowercase , __lowercase , __lowercase , __lowercase ) A: Union[str, Any] = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: A: Tuple = True # AP found via cycle if at == low[to]: A: Dict = True else: A: Any = min(low[at] , __lowercase ) return out_edge_count for i in range(__lowercase ): if not visited[i]: A: int = 0 A: List[Any] = dfs(__lowercase , __lowercase , -1 , __lowercase ) A: List[Any] = out_edge_count > 1 for x in range(len(__lowercase ) ): if is_art[x] is True: print(__lowercase ) # Adjacency list of graph UpperCamelCase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int: A: Any = 1 A: Optional[Any] = 0 for divide_by_number in range(__lowercase , digit + 1 ): A: list[int] = [] A: List[Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__lowercase ): A: Any = len(__lowercase ) A: Dict = divide_by_number else: has_been_divided.append(__lowercase ) A: str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : Tuple = False def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() A: Optional[int] = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: A: int = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: A: Dict = toks + toks # toks_str = [t[1] for t in toks] A: Union[str, Any] = [t[0] for t in toks] # Ensure consistency A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: A: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: A: Tuple = ''' ''' + output_txt A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Any = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[str] = '''Hello how are you''' A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Optional[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A: List[str] = tokenizer.decode(sample_ids[0] ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: List[Any] = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Optional[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off A: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A: Tuple = tokenizer.decode(sample_ids[0] ) A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _snake_case ( self : int ) -> List[str]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Union[str, Any] = '''Hello how are you''' A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Any = '''Hello how are you''' A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''Hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: str = '''Hello how Are you''' A: Union[str, Any] = '''hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off A: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Any = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: str = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) ) # transform list to ModelOutput A: Dict = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): [recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off A: int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _snake_case ( self : int ) -> int: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _snake_case ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' pass def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: str = tokenizer.vocab_size A: str = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) ) A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) ) A: int = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass def _snake_case ( self : str ) -> Tuple: '''simple docstring''' A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
319
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = XLMRobertaTokenizer UpperCamelCase_ : Optional[int] = XLMRobertaTokenizerFast UpperCamelCase_ : List[str] = True UpperCamelCase_ : str = True def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A: List[str] = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: List[str] = '''<pad>''' A: Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> Optional[int]: '''simple docstring''' A: Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_02 ) def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A: str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A: List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) A: int = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _snake_case ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A: str = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A: Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: int = tempfile.mkdtemp() A: Optional[int] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) A: Dict = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way A: Tuple = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=True A: Tuple = tempfile.mkdtemp() A: Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way A: int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=False A: List[Any] = tempfile.mkdtemp() A: str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) A: str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A: str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) A: int = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) @cached_property def _snake_case ( self : str ) -> int: '''simple docstring''' return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name ) A: Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ ) A: str = pickle.dumps(SCREAMING_SNAKE_CASE_ ) pickle.loads(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A: Optional[int] = self.get_tokenizer() A: Union[str, Any] = self.get_rust_tokenizer() A: Dict = '''I was born in 92000, and this is falsé.''' A: Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: int = self.get_rust_tokenizer() A: Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _snake_case ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: Optional[int] = '''Hello World!''' A: Tuple = [0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def _snake_case ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A: Optional[Any] = [ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def _snake_case ( self : Optional[int] ) -> int: '''simple docstring''' A: int = {'''input_ids''': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
319
'''simple docstring''' import fire from utils import calculate_rouge, save_json def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: A: Any = [x.strip() for x in open(__lowercase ).readlines()] A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )] A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase ) if save_path is not None: save_json(__lowercase , __lowercase , indent=__lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
319
1
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCamelCase = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def SCREAMING_SNAKE_CASE( __lowercase ) -> str: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: return max(metric_fn(__lowercase , __lowercase ) for gt in ground_truths ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[Any]: A: List[Any] = [line.strip() for line in open(__lowercase , '''r''' ).readlines()] A: Any = [] if args.gold_data_mode == "qa": A: int = pd.read_csv(__lowercase , sep='''\t''' , header=__lowercase ) for answer_list in data[1]: A: Optional[Any] = ast.literal_eval(__lowercase ) answers.append(__lowercase ) else: A: Tuple = [line.strip() for line in open(__lowercase , '''r''' ).readlines()] A: List[str] = [[reference] for reference in references] A: List[Any] = 0 for prediction, ground_truths in zip(__lowercase , __lowercase ): total += 1 em += metric_max_over_ground_truths(__lowercase , __lowercase , __lowercase ) fa += metric_max_over_ground_truths(__lowercase , __lowercase , __lowercase ) A: Optional[Any] = 1_0_0.0 * em / total A: List[str] = 1_0_0.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> str: A: Tuple = args.k A: int = [line.strip() for line in open(__lowercase , '''r''' ).readlines()] A: List[Any] = [line.strip() for line in open(__lowercase , '''r''' ).readlines()] A: Dict = 0 for hypo, reference in zip(__lowercase , __lowercase ): A: List[str] = set(hypo.split('''\t''' )[:k] ) A: Dict = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k A: Any = 1_0_0.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Union[str, Any]: def strip_title(__lowercase ): if title.startswith('''"''' ): A: Dict = title[1:] if title.endswith('''"''' ): A: str = title[:-1] return title A: str = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __lowercase , return_tensors='''pt''' , padding=__lowercase , truncation=__lowercase , )['''input_ids'''].to(args.device ) A: List[Any] = rag_model.rag.question_encoder(__lowercase ) A: List[str] = question_enc_outputs[0] A: Optional[int] = rag_model.retriever( __lowercase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) A: Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) A: Tuple = [] for docs in all_docs: A: Tuple = [strip_title(__lowercase ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(__lowercase ) ) return provenance_strings def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[Any]: with torch.no_grad(): A: Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __lowercase , return_tensors='''pt''' , padding=__lowercase , truncation=__lowercase ) A: Optional[int] = inputs_dict.input_ids.to(args.device ) A: Any = inputs_dict.attention_mask.to(args.device ) A: int = rag_model.generate( # rag_model overwrites generate __lowercase , attention_mask=__lowercase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__lowercase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) A: List[str] = rag_model.retriever.generator_tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase ) if args.print_predictions: for q, a in zip(__lowercase , __lowercase ): logger.info('''Q: {} - A: {}'''.format(__lowercase , __lowercase ) ) return answers def SCREAMING_SNAKE_CASE( ) -> int: A: Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=__lowercase , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=__lowercase , choices=['''exact''', '''compressed''', '''legacy'''] , type=__lowercase , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=__lowercase , type=__lowercase , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=__lowercase , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=__lowercase , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=__lowercase , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=__lowercase , type=__lowercase , required=__lowercase , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=__lowercase , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=__lowercase , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=__lowercase , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=__lowercase , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=__lowercase , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=5_0 , type=__lowercase , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) A: Optional[int] = parser.parse_args() A: Dict = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Union[str, Any] = {} if args.model_type is None: A: Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): A: Optional[int] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration A: Optional[Any] = args.n_docs if args.index_name is not None: A: Optional[int] = args.index_name if args.index_path is not None: A: Tuple = args.index_path else: A: int = BartForConditionalGeneration A: List[Any] = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , __lowercase ) A: Optional[Any] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k A: Union[str, Any] = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(__lowercase , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(__lowercase ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): A: Dict = RagRetriever.from_pretrained(__lowercase , **__lowercase ) A: Any = model_class.from_pretrained(__lowercase , retriever=__lowercase , **__lowercase ) model.retriever.init_retrieval() else: A: Optional[int] = model_class.from_pretrained(__lowercase , **__lowercase ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: A: Dict = [] for line in tqdm(__lowercase ): questions.append(line.strip() ) if len(__lowercase ) == args.eval_batch_size: A: str = evaluate_batch_fn(__lowercase , __lowercase , __lowercase ) preds_file.write('''\n'''.join(__lowercase ) + '''\n''' ) preds_file.flush() A: Any = [] if len(__lowercase ) > 0: A: Tuple = evaluate_batch_fn(__lowercase , __lowercase , __lowercase ) preds_file.write('''\n'''.join(__lowercase ) ) preds_file.flush() score_fn(__lowercase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCamelCase = get_args() main(args)
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' from __future__ import annotations UpperCamelCase = tuple[int, int, int] UpperCamelCase = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' # -------------------------- default selection -------------------------- # rotors -------------------------- UpperCamelCase = '''EGZWVONAHDCLFQMSIPJBYUKXTR''' UpperCamelCase = '''FOBHMDKEXQNRAULPGSJVTYICZW''' UpperCamelCase = '''ZJXESIUQLHAVRMDOYGTNFWPBKC''' # reflector -------------------------- UpperCamelCase = { '''A''': '''N''', '''N''': '''A''', '''B''': '''O''', '''O''': '''B''', '''C''': '''P''', '''P''': '''C''', '''D''': '''Q''', '''Q''': '''D''', '''E''': '''R''', '''R''': '''E''', '''F''': '''S''', '''S''': '''F''', '''G''': '''T''', '''T''': '''G''', '''H''': '''U''', '''U''': '''H''', '''I''': '''V''', '''V''': '''I''', '''J''': '''W''', '''W''': '''J''', '''K''': '''X''', '''X''': '''K''', '''L''': '''Y''', '''Y''': '''L''', '''M''': '''Z''', '''Z''': '''M''', } # -------------------------- extra rotors -------------------------- UpperCamelCase = '''RMDJXFUWGISLHVTCQNKYPBEZOA''' UpperCamelCase = '''SGLCPQWZHKXAREONTFBVIYJUDM''' UpperCamelCase = '''HVSICLTYKQUBXDWAJZOMFGPREN''' UpperCamelCase = '''RZWQHFMVDBKICJLNTUXAGYPSOE''' UpperCamelCase = '''LFKIJODBEGAMQPXVUHYSTCZRWN''' UpperCamelCase = '''KOAEGVDHXPQZMLFTYWJNBRCIUS''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]: # Checks if there are 3 unique rotors if (unique_rotsel := len(set(__lowercase ) )) < 3: A: str = F"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(__lowercase ) # Checks if rotor positions are valid A , A , A: Union[str, Any] = rotpos if not 0 < rotorposa <= len(__lowercase ): A: Optional[Any] = F"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(__lowercase ) if not 0 < rotorposa <= len(__lowercase ): A: List[str] = F"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(__lowercase ) if not 0 < rotorposa <= len(__lowercase ): A: Any = F"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(__lowercase ) # Validates string and returns dict A: Optional[Any] = _plugboard(__lowercase ) return rotpos, rotsel, pbdict def SCREAMING_SNAKE_CASE( __lowercase ) -> dict[str, str]: # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(__lowercase , __lowercase ): A: Optional[int] = F"""Plugboard setting isn't type string ({type(__lowercase )})""" raise TypeError(__lowercase ) elif len(__lowercase ) % 2 != 0: A: Dict = F"""Odd number of symbols ({len(__lowercase )})""" raise Exception(__lowercase ) elif pbstring == "": return {} pbstring.replace(''' ''' , '''''' ) # Checks if all characters are unique A: Dict = set() for i in pbstring: if i not in abc: A: Optional[Any] = F"""'{i}' not in list of symbols""" raise Exception(__lowercase ) elif i in tmppbl: A: Dict = F"""Duplicate symbol ({i})""" raise Exception(__lowercase ) else: tmppbl.add(__lowercase ) del tmppbl # Created the dictionary A: List[str] = {} for j in range(0 , len(__lowercase ) - 1 , 2 ): A: Tuple = pbstring[j + 1] A: List[Any] = pbstring[j] return pb def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase = (rotora, rotora, rotora) , __lowercase = "" , ) -> str: A: List[str] = text.upper() A , A , A: Dict = _validator( __lowercase , __lowercase , plugb.upper() ) A , A , A: str = rotor_position A , A , A: Optional[int] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 A: Dict = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: A: Tuple = plugboard[symbol] # rotor ra -------------------------- A: Optional[Any] = abc.index(__lowercase ) + rotorposa A: str = rotora[index % len(__lowercase )] # rotor rb -------------------------- A: List[Any] = abc.index(__lowercase ) + rotorposa A: Tuple = rotora[index % len(__lowercase )] # rotor rc -------------------------- A: List[str] = abc.index(__lowercase ) + rotorposa A: int = rotora[index % len(__lowercase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher A: Union[str, Any] = reflector[symbol] # 2nd rotors A: Optional[Any] = abc[rotora.index(__lowercase ) - rotorposa] A: List[str] = abc[rotora.index(__lowercase ) - rotorposa] A: List[str] = abc[rotora.index(__lowercase ) - rotorposa] # 2nd plugboard if symbol in plugboard: A: int = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(__lowercase ): A: Dict = 0 rotorposa += 1 if rotorposa >= len(__lowercase ): A: Tuple = 0 rotorposa += 1 if rotorposa >= len(__lowercase ): A: str = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(__lowercase ) return "".join(__lowercase ) if __name__ == "__main__": UpperCamelCase = '''This is my Python script that emulates the Enigma machine from WWII.''' UpperCamelCase = (1, 1, 1) UpperCamelCase = '''pictures''' UpperCamelCase = (rotora, rotora, rotora) UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb) print('''Encrypted message:''', en) print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
319
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: List[Any] = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]: A: Tuple = OrderedDict() A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A: Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: int = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[int] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 5_1_2} A: List[str] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: List[str] = {'''visual_embedding_dim''': 2_0_4_8} A: Optional[int] = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: A: Optional[int] = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: str = '''nlvr''' A: Union[str, Any] = VisualBertConfig(**__lowercase ) # Load State Dict A: Union[str, Any] = load_state_dict(__lowercase ) A: str = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: Optional[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Any = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''', }, '''merges_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''', '''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''', '''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''', '''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''', '''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''gpt2''': 1024, '''gpt2-medium''': 1024, '''gpt2-large''': 1024, '''gpt2-xl''': 1024, '''distilgpt2''': 1024, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Any = VOCAB_FILES_NAMES UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Union[str, Any] = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : Optional[int] = GPTaTokenizer def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : str="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Optional[int]=False , **SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Optional[int]: '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Optional[int] = kwargs.pop('''add_bos_token''' , SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: A: Any = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('''type''' ) ) A: Tuple = add_prefix_space A: List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) A: Tuple = add_prefix_space def _snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> BatchEncoding: '''simple docstring''' A: str = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ) -> BatchEncoding: '''simple docstring''' A: str = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A: List[str] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : "Conversation" ) -> List[int]: '''simple docstring''' A: List[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] ) if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length: A: Union[str, Any] = input_ids[-self.model_max_length :] return input_ids
319
'''simple docstring''' from itertools import permutations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A: int = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__lowercase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int: return sum( int(''''''.join(map(__lowercase , __lowercase ) ) ) for num in permutations(range(__lowercase ) ) if is_substring_divisible(__lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
319
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[str] = ["""image_processor""", """tokenizer"""] UpperCamelCase_ : Union[str, Any] = """ViltImageProcessor""" UpperCamelCase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]: '''simple docstring''' A: str = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) A: Dict = kwargs.pop('''feature_extractor''' ) A: Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: int = self.image_processor def __call__( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : int , ) -> BatchEncoding: '''simple docstring''' A: Optional[Any] = self.tokenizer( text=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # add pixel_values + pixel_mask A: Optional[int] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ ) encoding.update(SCREAMING_SNAKE_CASE_ ) return encoding def _snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> List[Any]: '''simple docstring''' return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def _snake_case ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: List[str] = self.tokenizer.model_input_names A: Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _snake_case ( self : Any ) -> Union[str, Any]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor_class @property def _snake_case ( self : Optional[int] ) -> Any: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor
319
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
1
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets UpperCamelCase = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' UpperCamelCase = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' UpperCamelCase = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: return float((preds == labels).mean() ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Optional[int] = simple_accuracy(__lowercase , __lowercase ) A: str = float(fa_score(y_true=__lowercase , y_pred=__lowercase ) ) return { "accuracy": acc, "f1": fa, } def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: A: Optional[int] = np.array(__lowercase ) A: Optional[Any] = np.array(__lowercase ) A: int = en_sentvecs.shape[0] # mean centering A: str = en_sentvecs - np.mean(__lowercase , axis=0 ) A: Tuple = in_sentvecs - np.mean(__lowercase , axis=0 ) A: str = cdist(__lowercase , __lowercase , '''cosine''' ) A: Union[str, Any] = np.array(range(__lowercase ) ) A: Any = sim.argsort(axis=1 )[:, :1_0] A: Dict = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _snake_case ( self : Optional[int] ) -> int: '''simple docstring''' if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]: '''simple docstring''' if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): raise TypeError('''only integers accepted as input''' ) else: A: str = str(abs(__lowercase ) ) A: int = [list(__lowercase ) for char in range(len(__lowercase ) )] for index in range(len(__lowercase ) ): num_transpositions[index].pop(__lowercase ) return max( int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase = 1_0_0_0_0_0_0 ) -> int: A: Optional[Any] = limit + 1 A: List[Any] = [0] * limit for first_term in range(1 , __lowercase ): for n in range(__lowercase , __lowercase , __lowercase ): A: Optional[Any] = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a A: Union[str, Any] = sum(1 for x in frequency[1:limit] if x == 1_0 ) return count if __name__ == "__main__": print(f'{solution() = }')
319
'''simple docstring''' from __future__ import annotations import math def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) A: str = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]: if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) A: Union[str, Any] = len(__lowercase ) A: str = matrix_length // 2 A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )] A: Optional[Any] = [ [a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase ) ] A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )] A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )] return top_left, top_right, bot_left, bot_right def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]: return len(__lowercase ), len(matrix[0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> None: print('''\n'''.join(str(__lowercase ) for line in matrix ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase ) == (2, 2): return default_matrix_multiplication(__lowercase , __lowercase ) A , A , A , A: Union[str, Any] = split_matrix(__lowercase ) A , A , A , A: List[Any] = split_matrix(__lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) A: Any = matrix_addition(__lowercase , __lowercase ) A: List[Any] = matrix_addition(__lowercase , __lowercase ) A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) # construct the new matrix from our 4 quadrants A: Union[str, Any] = [] for i in range(len(__lowercase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__lowercase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]: A: int = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(__lowercase ) A: str = matrix_dimensions(__lowercase ) A: str = matrix_dimensions(__lowercase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] A: Union[str, Any] = max(*__lowercase , *__lowercase ) A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) ) A: List[Any] = matrixa A: Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) A: Any = actual_strassen(__lowercase , __lowercase ) # Removing the additional zeros for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": UpperCamelCase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
319
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: List[Any] = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]: A: Tuple = OrderedDict() A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A: Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: int = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[int] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 5_1_2} A: List[str] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: List[str] = {'''visual_embedding_dim''': 2_0_4_8} A: Optional[int] = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: A: Optional[int] = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: str = '''nlvr''' A: Union[str, Any] = VisualBertConfig(**__lowercase ) # Load State Dict A: Union[str, Any] = load_state_dict(__lowercase ) A: str = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: Optional[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Any = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = 2 @register_to_config def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = sigma_max # setable values A: int = None A: np.IntTensor = None A: torch.FloatTensor = None # sigma(t_i) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' A: List[Any] = num_inference_steps A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) A: str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A: List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) A: Optional[Any] = sigma + gamma * sigma A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: Union[str, Any] = sample_hat + sigma_hat * model_output A: str = (sample_hat - pred_original_sample) / sigma_hat A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: int = sample_prev + sigma_prev * model_output A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' raise NotImplementedError()
319
1
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
319
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if self.train_file is not None: A: Tuple = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A: str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())] assert len(__lowercase ) == len(__lowercase ) A: Optional[int] = {c: dataset[c] for c in dataset.column_names} A: Union[str, Any] = refs return Dataset.from_dict(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A: Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) A: Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: A: Any = {} if data_args.train_file is not None: A: int = data_args.train_file if data_args.validation_file is not None: A: Optional[int] = data_args.validation_file A: List[str] = data_args.train_file.split('''.''' )[-1] if extension == "txt": A: int = '''text''' A: Any = load_dataset(__lowercase , data_files=__lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A: Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: A: str = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) A: Tuple = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase ) elif model_args.model_name_or_path: A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: A: List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A: int = datasets['''train'''].column_names else: A: str = datasets['''validation'''].column_names A: Tuple = '''text''' if '''text''' in column_names else column_names[0] A: List[str] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__lowercase ): # Remove empty lines A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length ) A: str = datasets.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A: Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A: List[Any] = False # Data collator # This one will take care of randomly masking the tokens. A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: A: Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A: str = model_args.model_name_or_path else: A: List[str] = None A: str = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation A: Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A: Optional[Any] = trainer.evaluate() A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) A: Dict = perplexity A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
319
1
'''simple docstring''' import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated UpperCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ UpperCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowercase )[0] @deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> str: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowercase ) as bytestream: A: Union[str, Any] = _readaa(__lowercase ) if magic != 2_0_5_1: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) A: Optional[int] = _readaa(__lowercase ) A: List[str] = _readaa(__lowercase ) A: Optional[int] = _readaa(__lowercase ) A: Any = bytestream.read(rows * cols * num_images ) A: List[str] = numpy.frombuffer(__lowercase , dtype=numpy.uinta ) A: int = data.reshape(__lowercase , __lowercase , __lowercase , 1 ) return data @deprecated(__lowercase , '''Please use tf.one_hot on tensors.''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: A: int = labels_dense.shape[0] A: Any = numpy.arange(__lowercase ) * num_classes A: int = numpy.zeros((num_labels, num_classes) ) A: int = 1 return labels_one_hot @deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=1_0 ) -> int: print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=__lowercase ) as bytestream: A: Union[str, Any] = _readaa(__lowercase ) if magic != 2_0_4_9: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) A: Dict = _readaa(__lowercase ) A: List[str] = bytestream.read(__lowercase ) A: List[Any] = numpy.frombuffer(__lowercase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__lowercase , __lowercase ) return labels class lowerCAmelCase_ : '''simple docstring''' @deprecated( SCREAMING_SNAKE_CASE_ , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=dtypes.floataa , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> Dict: '''simple docstring''' A , A: Union[str, Any] = random_seed.get_seed(SCREAMING_SNAKE_CASE_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) A: Tuple = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: A: int = 1_00_00 A: Tuple = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f"""images.shape: {images.shape} labels.shape: {labels.shape}""" A: int = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 A: Optional[int] = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. A: int = images.astype(numpy.floataa ) A: List[Any] = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 255.0 ) A: int = images A: List[Any] = labels A: List[str] = 0 A: List[str] = 0 @property def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return self._images @property def _snake_case ( self : Tuple ) -> Tuple: '''simple docstring''' return self._labels @property def _snake_case ( self : Optional[int] ) -> Any: '''simple docstring''' return self._num_examples @property def _snake_case ( self : Optional[Any] ) -> Tuple: '''simple docstring''' return self._epochs_completed def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> Dict: '''simple docstring''' if fake_data: A: List[Any] = [1] * 7_84 A: Any = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(SCREAMING_SNAKE_CASE_ )], [fake_label for _ in range(SCREAMING_SNAKE_CASE_ )], ) A: str = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: A: Union[str, Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = self.images[perma] A: Any = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch A: Union[str, Any] = self._num_examples - start A: int = self._images[start : self._num_examples] A: Dict = self._labels[start : self._num_examples] # Shuffle the data if shuffle: A: Optional[Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(SCREAMING_SNAKE_CASE_ ) A: str = self.images[perm] A: Dict = self.labels[perm] # Start next epoch A: List[Any] = 0 A: List[str] = batch_size - rest_num_examples A: List[str] = self._index_in_epoch A: Tuple = self._images[start:end] A: Optional[Any] = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size A: int = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__lowercase , '''Please write your own downloading logic.''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> str: if not gfile.Exists(__lowercase ): gfile.MakeDirs(__lowercase ) A: Tuple = os.path.join(__lowercase , __lowercase ) if not gfile.Exists(__lowercase ): urllib.request.urlretrieve(__lowercase , __lowercase ) # noqa: S310 with gfile.GFile(__lowercase ) as f: A: List[str] = f.size() print('''Successfully downloaded''' , __lowercase , __lowercase , '''bytes.''' ) return filepath @deprecated( __lowercase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=dtypes.floataa , __lowercase=True , __lowercase=5_0_0_0 , __lowercase=None , __lowercase=DEFAULT_SOURCE_URL , ) -> int: if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__lowercase , one_hot=__lowercase , dtype=__lowercase , seed=__lowercase ) A: int = fake() A: int = fake() A: Union[str, Any] = fake() return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase ) if not source_url: # empty string check A: Tuple = DEFAULT_SOURCE_URL A: Union[str, Any] = '''train-images-idx3-ubyte.gz''' A: List[Any] = '''train-labels-idx1-ubyte.gz''' A: Any = '''t10k-images-idx3-ubyte.gz''' A: Dict = '''t10k-labels-idx1-ubyte.gz''' A: Dict = _maybe_download( __lowercase , __lowercase , source_url + train_images_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: Optional[int] = _extract_images(__lowercase ) A: Union[str, Any] = _maybe_download( __lowercase , __lowercase , source_url + train_labels_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: Optional[Any] = _extract_labels(__lowercase , one_hot=__lowercase ) A: Any = _maybe_download( __lowercase , __lowercase , source_url + test_images_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: Any = _extract_images(__lowercase ) A: List[Any] = _maybe_download( __lowercase , __lowercase , source_url + test_labels_file ) with gfile.Open(__lowercase , '''rb''' ) as f: A: Optional[int] = _extract_labels(__lowercase , one_hot=__lowercase ) if not 0 <= validation_size <= len(__lowercase ): A: Dict = ( '''Validation size should be between 0 and ''' F"""{len(__lowercase )}. Received: {validation_size}.""" ) raise ValueError(__lowercase ) A: Tuple = train_images[:validation_size] A: List[str] = train_labels[:validation_size] A: List[str] = train_images[validation_size:] A: Optional[Any] = train_labels[validation_size:] A: Union[str, Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} A: int = _DataSet(__lowercase , __lowercase , **__lowercase ) A: Tuple = _DataSet(__lowercase , __lowercase , **__lowercase ) A: Union[str, Any] = _DataSet(__lowercase , __lowercase , **__lowercase ) return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
319
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : Tuple = False def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() A: Optional[int] = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: A: int = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: A: Dict = toks + toks # toks_str = [t[1] for t in toks] A: Union[str, Any] = [t[0] for t in toks] # Ensure consistency A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: A: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: A: Tuple = ''' ''' + output_txt A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Any = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[str] = '''Hello how are you''' A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Optional[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A: List[str] = tokenizer.decode(sample_ids[0] ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: List[Any] = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Optional[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off A: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A: Tuple = tokenizer.decode(sample_ids[0] ) A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _snake_case ( self : int ) -> List[str]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Union[str, Any] = '''Hello how are you''' A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Any = '''Hello how are you''' A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''Hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: str = '''Hello how Are you''' A: Union[str, Any] = '''hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off A: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Any = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: str = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) ) # transform list to ModelOutput A: Dict = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): [recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off A: int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _snake_case ( self : int ) -> int: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _snake_case ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' pass def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: str = tokenizer.vocab_size A: str = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) ) A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) ) A: int = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass def _snake_case ( self : str ) -> Tuple: '''simple docstring''' A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A: Any = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) A: Dict = get_activation('''gelu''' ) self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE_ ) , torch_builtin(SCREAMING_SNAKE_CASE_ ) ) ) self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE_ ) , gelu_new(SCREAMING_SNAKE_CASE_ ) ) ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[Any] = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) A: List[Any] = get_activation('''gelu''' ) A: Optional[int] = get_activation('''gelu_10''' ) A: Any = torch_builtin(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = geluaa(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(SCREAMING_SNAKE_CASE_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _snake_case ( self : Optional[Any] ) -> Tuple: '''simple docstring''' get_activation('''gelu''' ) get_activation('''gelu_10''' ) get_activation('''gelu_fast''' ) get_activation('''gelu_new''' ) get_activation('''gelu_python''' ) get_activation('''gelu_pytorch_tanh''' ) get_activation('''linear''' ) get_activation('''mish''' ) get_activation('''quick_gelu''' ) get_activation('''relu''' ) get_activation('''sigmoid''' ) get_activation('''silu''' ) get_activation('''swish''' ) get_activation('''tanh''' ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): get_activation('''bogus''' ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): get_activation(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> int: '''simple docstring''' A: int = get_activation('''gelu''' ) A: Optional[Any] = 1 A: List[Any] = get_activation('''gelu''' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Tuple = acta.a
319
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger() @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> List[str]: '''simple docstring''' A: Tuple = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(SCREAMING_SNAKE_CASE_ ) def __call__( self : str , SCREAMING_SNAKE_CASE_ : Tensor ) -> List[str]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(SCREAMING_SNAKE_CASE_ ) [x.remove() for x in self.handles] return self @property def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : nn.Module UpperCamelCase_ : nn.Module UpperCamelCase_ : int = 1 UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ ) UpperCamelCase_ : bool = True def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]: '''simple docstring''' A: str = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized A: List[str] = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) ) A: int = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while""" f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" ) for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""" ) class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : nn.Module ) -> List[Any]: '''simple docstring''' super().__init__() A: List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(('''conv1''', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('''block''' ), f"""Unexpected layer name {k}""" A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) + 1 feature_blocks.append((f"""res{block_index}""", v) ) A: Optional[Any] = nn.ModuleDict(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Any: '''simple docstring''' return get_trunk_forward_outputs( SCREAMING_SNAKE_CASE_ , out_feat_keys=SCREAMING_SNAKE_CASE_ , feature_blocks=self._feature_blocks , ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> str: '''simple docstring''' A: Optional[int] = x.split('''-''' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE_ : str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: A: int = self.convert_name_to_timm(SCREAMING_SNAKE_CASE_ ) A: int = partial(lambda: (timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ ).eval(), None) ) else: A: Dict = super().__getitem__(SCREAMING_SNAKE_CASE_ ) return val class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __getitem__( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: A: Tuple = RegNetModel else: A: str = RegNetForImageClassification return val def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Dict: for from_key, to_key in keys: A: Any = from_state_dict[from_key].clone() print(F"""Copied key={from_key} to={to_key}""" ) return to_state_dict def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> List[str]: print(F"""Converting {name}...""" ) with torch.no_grad(): A , A: Any = from_model_func() A: Union[str, Any] = our_model_func(__lowercase ).eval() A: str = ModuleTransfer(src=__lowercase , dest=__lowercase , raise_if_mismatch=__lowercase ) A: Tuple = torch.randn((1, 3, 2_2_4, 2_2_4) ) module_transfer(__lowercase ) if from_state_dict is not None: A: str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: A: List[Any] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')] A: Dict = manually_copy_vissl_head(__lowercase , our_model.state_dict() , __lowercase ) our_model.load_state_dict(__lowercase ) A: Optional[int] = our_model(__lowercase , output_hidden_states=__lowercase ) A: Any = ( our_outputs.logits if isinstance(__lowercase , __lowercase ) else our_outputs.last_hidden_state ) A: Optional[Any] = from_model(__lowercase ) A: Optional[int] = from_output[-1] if type(__lowercase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: A: Any = our_outputs.hidden_states[-1] assert torch.allclose(__lowercase , __lowercase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=__lowercase , ) A: Dict = 2_2_4 if '''seer''' not in name else 3_8_4 # we can use the convnext one A: Dict = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=__lowercase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , ) print(F"""Pushed {name}""" ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> Union[str, Any]: A: Any = '''imagenet-1k-id2label.json''' A: List[str] = 1_0_0_0 A: Optional[Any] = (1, num_labels) A: List[Any] = '''huggingface/label-files''' A: str = num_labels A: Optional[int] = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='''dataset''' ) ) , '''r''' ) ) A: Dict = {int(__lowercase ): v for k, v in idalabel.items()} A: Tuple = idalabel A: Optional[Any] = {v: k for k, v in idalabel.items()} A: List[str] = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase ) A: Union[str, Any] = { '''regnet-x-002''': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type='''x''' ), '''regnet-x-004''': ImageNetPreTrainedConfig( depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type='''x''' ), '''regnet-x-006''': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type='''x''' ), '''regnet-x-008''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type='''x''' ), '''regnet-x-016''': ImageNetPreTrainedConfig( depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type='''x''' ), '''regnet-x-032''': ImageNetPreTrainedConfig( depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type='''x''' ), '''regnet-x-040''': ImageNetPreTrainedConfig( depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type='''x''' ), '''regnet-x-064''': ImageNetPreTrainedConfig( depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type='''x''' ), '''regnet-x-080''': ImageNetPreTrainedConfig( depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type='''x''' ), '''regnet-x-120''': ImageNetPreTrainedConfig( depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type='''x''' ), '''regnet-x-160''': ImageNetPreTrainedConfig( depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type='''x''' ), '''regnet-x-320''': ImageNetPreTrainedConfig( depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type='''x''' ), # y variant '''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ), '''regnet-y-004''': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ), '''regnet-y-006''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ), '''regnet-y-008''': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ), '''regnet-y-016''': ImageNetPreTrainedConfig( depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ), '''regnet-y-032''': ImageNetPreTrainedConfig( depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ), '''regnet-y-040''': ImageNetPreTrainedConfig( depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ), '''regnet-y-064''': ImageNetPreTrainedConfig( depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ), '''regnet-y-080''': ImageNetPreTrainedConfig( depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ), '''regnet-y-120''': ImageNetPreTrainedConfig( depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ), '''regnet-y-160''': ImageNetPreTrainedConfig( depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ), '''regnet-y-320''': ImageNetPreTrainedConfig( depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 '''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ), '''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ), '''regnet-y-1280-seer''': RegNetConfig( depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ), '''regnet-y-2560-seer''': RegNetConfig( depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ), '''regnet-y-10b-seer''': ImageNetPreTrainedConfig( depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ), # finetuned on imagenet '''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ), '''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ), '''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ), '''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig( depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ), '''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ), } A: Dict = NameToOurModelFuncMap() A: Union[str, Any] = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__lowercase , __lowercase ) -> Tuple[nn.Module, Dict]: A: Any = torch.hub.load_state_dict_from_url(__lowercase , model_dir=str(__lowercase ) , map_location='''cpu''' ) A: Any = model_func() # check if we have a head, if yes add it A: Optional[Any] = files['''classy_state_dict''']['''base_model''']['''model'''] A: List[Any] = model_state_dict['''trunk'''] model.load_state_dict(__lowercase ) return model.eval(), model_state_dict["heads"] # pretrained A: str = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A: List[str] = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A: Optional[int] = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) A: str = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) # IN1K finetuned A: Optional[Any] = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A: Union[str, Any] = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A: int = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) A: Dict = partial( __lowercase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=6_2_0.8_3 , w_m=2.5_2 ) ) ) , ) if model_name: convert_weight_and_push( __lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __lowercase , __lowercase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __lowercase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __lowercase , __lowercase , __lowercase , ) return config, expected_shape if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported regnet* architecture,''' ''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
319
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports UpperCamelCase = ''' import os ''' UpperCamelCase = ''' def foo(): import os return False ''' UpperCamelCase = ''' def foo(): def bar(): if True: import os return False return bar() ''' UpperCamelCase = ''' import os try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except ImportError as e: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' UpperCamelCase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Tuple = os.path.join(__lowercase , '''test_file.py''' ) with open(__lowercase , '''w''' ) as _tmp_file: _tmp_file.write(__lowercase ) A: List[Any] = get_imports(__lowercase ) assert parsed_imports == ["os"]
319
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCamelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=1_0_0 , __lowercase=" " ) -> List[str]: A: int = text.split(__lowercase ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowercase ) , __lowercase )] def SCREAMING_SNAKE_CASE( __lowercase ) -> dict: A , A: Optional[int] = [], [] for title, text in zip(documents['''title'''] , documents['''text'''] ): if text is not None: for passage in split_text(__lowercase ): titles.append(title if title is not None else '''''' ) texts.append(__lowercase ) return {"title": titles, "text": texts} def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> dict: A: Any = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=__lowercase , padding='''longest''' , return_tensors='''pt''' )['''input_ids'''] A: Optional[Any] = ctx_encoder(input_ids.to(device=__lowercase ) , return_dict=__lowercase ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , ) -> Tuple: ###################################### logger.info('''Step 1 - Create the dataset''' ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way A: Optional[int] = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words A: Optional[Any] = dataset.map(__lowercase , batched=__lowercase , num_proc=processing_args.num_proc ) # And compute the embeddings A: List[Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowercase ) A: str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) A: List[Any] = Features( {'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space A: int = dataset.map( partial(__lowercase , ctx_encoder=__lowercase , ctx_tokenizer=__lowercase ) , batched=__lowercase , batch_size=processing_args.batch_size , features=__lowercase , ) # And finally save your dataset A: List[str] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' ) dataset.save_to_disk(__lowercase ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''' ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search A: str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index('''embeddings''' , custom_index=__lowercase ) # And save the index A: Optional[int] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' ) dataset.get_index('''embeddings''' ).save(__lowercase ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : str = field( default=str(Path(UpperCAmelCase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , ) UpperCamelCase_ : str = field( default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , ) UpperCamelCase_ : str = field( default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={ """help""": ( """The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or""" """ 'facebook/dpr-ctx_encoder-multiset-base'""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=str(Path(UpperCAmelCase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": """The number of processes to use to split the documents into passages. Default is single process.""" } , ) UpperCamelCase_ : int = field( default=16 , metadata={ """help""": """The batch size to use when computing the passages embeddings using the DPR context encoder.""" } , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : int = field( default=768 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , ) UpperCamelCase_ : int = field( default=128 , metadata={ """help""": ( """The number of bi-directional links created for every new element during the HNSW index construction.""" ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCamelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
319
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]: A: str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: for i in range(config.num_hidden_layers ): A: Tuple = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A: Dict = in_proj_weight[ : config.hidden_size, : ] A: int = in_proj_bias[: config.hidden_size] A: Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A: int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A: Optional[int] = in_proj_weight[ -config.hidden_size :, : ] A: Optional[Any] = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Optional[int] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: List[Any] = dct.pop(__lowercase ) A: int = val @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase ) A: Tuple = False A: str = False A: List[Any] = False A: Optional[int] = False if "vqa" in checkpoint_url: A: Union[str, Any] = True A: Union[str, Any] = 3_1_2_9 A: List[Any] = '''huggingface/label-files''' A: Any = '''vqa2-id2label.json''' A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()} A: Any = idalabel A: Optional[Any] = {v: k for k, v in idalabel.items()} A: List[str] = ViltForQuestionAnswering(__lowercase ) elif "nlvr" in checkpoint_url: A: Dict = True A: str = 2 A: Union[str, Any] = {0: '''False''', 1: '''True'''} A: Any = {v: k for k, v in config.idalabel.items()} A: Optional[Any] = 3 A: Any = ViltForImagesAndTextClassification(__lowercase ) elif "irtr" in checkpoint_url: A: Tuple = True A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase ) elif "mlm_itm" in checkpoint_url: A: Tuple = True A: Optional[int] = ViltForMaskedLM(__lowercase ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict'''] A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) if mlm_model or irtr_model: A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__lowercase ) # Define processor A: Optional[Any] = ViltImageProcessor(size=3_8_4 ) A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' ) A: Optional[int] = ViltProcessor(__lowercase , __lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: Any = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[str] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw ) if mlm_model: A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].''' else: A: Optional[int] = '''How many cats are there?''' A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: Any = model(**__lowercase ) # Verify outputs if mlm_model: A: Any = torch.Size([1, 1_1, 3_0_5_2_2] ) A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify masked token prediction equals "cats" A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: A: Any = torch.Size([1, 3_1_2_9] ) A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify vqa prediction equals "2" A: Dict = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: A: Union[str, Any] = torch.Size([1, 2] ) A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP UpperCamelCase = False try: UpperCamelCase = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : list = [] ) -> int: '''simple docstring''' A: Any = 0 A: Dict = choices A: Optional[int] = prompt if sys.platform == "win32": A: Optional[int] = '''*''' else: A: int = '''➔ ''' def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str = "" ) -> List[str]: '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , SCREAMING_SNAKE_CASE_ ) else: forceWrite(self.choices[index] , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: '''simple docstring''' if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(SCREAMING_SNAKE_CASE_ ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Direction , SCREAMING_SNAKE_CASE_ : int = 1 ) -> str: '''simple docstring''' A: Tuple = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(SCREAMING_SNAKE_CASE_ ) move_cursor(SCREAMING_SNAKE_CASE_ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP['''up'''] ) def _snake_case ( self : str ) -> Optional[int]: '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP['''down'''] ) def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP['''newline'''] ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' move_cursor(len(self.choices ) - self.position , '''DOWN''' ) return self.position @input.mark(KEYMAP['''interrupt'''] ) def _snake_case ( self : Tuple ) -> int: '''simple docstring''' move_cursor(len(self.choices ) - self.position , '''DOWN''' ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(SCREAMING_SNAKE_CASE_ )] for number in range(10 )] ) def _snake_case ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' A: Dict = int(chr(self.current_selection ) ) A: Dict = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , SCREAMING_SNAKE_CASE_ ) else: return else: return def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : int = 0 ) -> Union[str, Any]: '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , '''\n''' ) if in_colab: forceWrite('''Please input a choice index (starting from 0), and press enter''' , '''\n''' ) else: forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' , '''\n''' ) A: Union[str, Any] = default_choice for i in range(len(self.choices ) ): self.print_choice(SCREAMING_SNAKE_CASE_ ) forceWrite('''\n''' ) move_cursor(len(self.choices ) - self.position , '''UP''' ) with cursor.hide(): while True: if in_colab: try: A: str = int(builtins.input() ) except ValueError: A: List[str] = default_choice else: A: Optional[int] = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , '''UP''' ) clear_line() self.write_choice(SCREAMING_SNAKE_CASE_ , '''\n''' ) return choice
319
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } UpperCamelCase = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: A: Tuple = EfficientNetConfig() A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim'''] A: Optional[int] = CONFIG_MAP[model_name]['''width_coef'''] A: str = CONFIG_MAP[model_name]['''depth_coef'''] A: Dict = CONFIG_MAP[model_name]['''image_size'''] A: str = CONFIG_MAP[model_name]['''dropout_rate'''] A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding'''] A: Optional[Any] = '''huggingface/label-files''' A: List[str] = '''imagenet-1k-id2label.json''' A: Dict = 1_0_0_0 A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()} A: int = idalabel A: Tuple = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE( ) -> Any: A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: List[str] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , ) return preprocessor def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] A: List[str] = sorted(set(__lowercase ) ) A: Dict = len(__lowercase ) A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )} A: Optional[int] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: A: int = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) A: Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: A: str = '''efficientnet.''' + item[1] A: int = '''classifier.weight''' A: Tuple = '''classifier.bias''' return key_mapping def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue A: Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) ) else: A: Any = torch.from_numpy(__lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__lowercase ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: A: Optional[int] = model_classes[model_name]( include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , ) A: List[str] = original_model.trainable_variables A: Optional[Any] = original_model.non_trainable_variables A: Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A: int = param.numpy() A: Tuple = list(tf_params.keys() ) # Load HuggingFace model A: Dict = get_efficientnet_config(__lowercase ) A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval() A: Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) A: int = rename_keys(__lowercase ) replace_params(__lowercase , __lowercase , __lowercase ) # Initialize preprocessor and preprocess input image A: List[Any] = convert_image_processor(__lowercase ) A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): A: str = hf_model(**__lowercase ) A: List[Any] = outputs.logits.detach().numpy() # Original model inference A: Any = False A: List[Any] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A: str = image.img_to_array(__lowercase ) A: Dict = np.expand_dims(__lowercase , axis=0 ) A: Any = original_model.predict(__lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(__lowercase ): os.mkdir(__lowercase ) # Save converted model and image processor hf_model.save_pretrained(__lowercase ) preprocessor.save_pretrained(__lowercase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) A: int = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(__lowercase ) hf_model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') UpperCamelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
'''simple docstring''' UpperCamelCase = 8.31_44_62 # Unit - J mol-1 K-1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
319
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
1
'''simple docstring''' from bisect import bisect from itertools import accumulate def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: A: Any = sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : x[0] / x[1] , reverse=__lowercase ) A , A: Any = [i[0] for i in r], [i[1] for i in r] A: int = list(accumulate(__lowercase ) ) A: int = bisect(__lowercase , __lowercase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = Dict[str, Any] UpperCamelCase = List[Prediction] @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Any = {} if "threshold" in kwargs: A: List[Any] = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: int = load_image(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = torch.IntTensor([[image.height, image.width]] ) A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) A: Any = target_size return inputs def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' A: Tuple = model_inputs.pop('''target_size''' ) A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ ) A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: A: Dict = model_inputs['''bbox'''] return model_outputs def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A , A: Union[str, Any] = target_size[0].tolist() def unnormalize(SCREAMING_SNAKE_CASE_ : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )] A: Dict = ['''score''', '''label''', '''box'''] A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = raw_annotations[0] A: List[Any] = raw_annotation['''scores'''] A: List[Any] = raw_annotation['''labels'''] A: int = raw_annotation['''boxes'''] A: Any = scores.tolist() A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels] A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A: Tuple = ['''score''', '''label''', '''box'''] A: str = [ dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) A , A , A , A: str = box.int().tolist() A: str = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
319
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: A: Optional[int] = ksize + 1 A: str = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(__lowercase ): for x in range(__lowercase ): # distance from center A: str = x - ksize // 2 A: Union[str, Any] = y - ksize // 2 # degree to radiant A: Dict = theta / 1_8_0 * np.pi A: Optional[int] = np.cos(_theta ) A: str = np.sin(_theta ) # get kernel x A: Dict = cos_theta * px + sin_theta * py # get kernel y A: List[Any] = -sin_theta * px + cos_theta * py # fill kernel A: Union[str, Any] = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image UpperCamelCase = imread('''../image_data/lena.jpg''') # turn image in gray scale value UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges UpperCamelCase = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: UpperCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) UpperCamelCase = out / out.max() * 255 UpperCamelCase = out.astype(np.uinta) imshow('''Original''', gray) imshow('''Gabor filter with 20x20 mask and 6 directions''', out) waitKey(0)
319
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = """convbert""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Dict = vocab_size A: Tuple = hidden_size A: Optional[int] = num_hidden_layers A: List[str] = num_attention_heads A: int = intermediate_size A: int = hidden_act A: List[str] = hidden_dropout_prob A: int = attention_probs_dropout_prob A: Tuple = max_position_embeddings A: Any = type_vocab_size A: str = initializer_range A: Union[str, Any] = layer_norm_eps A: str = embedding_size A: Optional[int] = head_ratio A: List[Any] = conv_kernel_size A: List[Any] = num_groups A: Optional[int] = classifier_dropout class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A: List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
319
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Dict = StableDiffusionXLImgaImgPipeline UpperCamelCase_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} UpperCamelCase_ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""} UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self : Any ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) A: str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) A: List[Any] = EulerDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) A: Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A: Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , ) A: str = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) A: List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=SCREAMING_SNAKE_CASE_ ) A: Tuple = CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=SCREAMING_SNAKE_CASE_ ) A: List[str] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any]=0 ) -> List[Any]: '''simple docstring''' A: List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = image / 2 + 0.5 if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ): A: Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: A: int = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) A: Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' A: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator A: Union[str, Any] = self.get_dummy_components() A: str = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ ) A: Optional[int] = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) A: Any = sd_pipe(**SCREAMING_SNAKE_CASE_ ).images A: Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) A: Any = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _snake_case ( self : Dict ) -> Union[str, Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = self.get_dummy_components() A: int = StableDiffusionXLImgaImgPipeline(**SCREAMING_SNAKE_CASE_ ) A: Tuple = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) A: Any = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) # forward without prompt embeds A: Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = 3 * ['''this is a negative prompt'''] A: int = negative_prompt A: int = 3 * [inputs['''prompt''']] A: List[Any] = sd_pipe(**SCREAMING_SNAKE_CASE_ ) A: Dict = output.images[0, -3:, -3:, -1] # forward with prompt embeds A: Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) A: List[Any] = 3 * ['''this is a negative prompt'''] A: str = 3 * [inputs.pop('''prompt''' )] ( ( A ) , ( A ) , ( A ) , ( A ) , ): Optional[Any] = sd_pipe.encode_prompt(SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = sd_pipe( **SCREAMING_SNAKE_CASE_ , prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_prompt_embeds=SCREAMING_SNAKE_CASE_ , pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , negative_pooled_prompt_embeds=SCREAMING_SNAKE_CASE_ , ) A: str = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]="cpu" , SCREAMING_SNAKE_CASE_ : Tuple=torch.floataa , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> List[str]: '''simple docstring''' A: Any = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) A: Tuple = np.random.RandomState(SCREAMING_SNAKE_CASE_ ).standard_normal((1, 4, 64, 64) ) A: int = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _snake_case ( self : Any ) -> str: '''simple docstring''' A: List[str] = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) A: Tuple = self.get_inputs(SCREAMING_SNAKE_CASE_ ) A: int = pipe(**SCREAMING_SNAKE_CASE_ ).images A: Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) A: Tuple = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
319
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if len(__lowercase ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) A: Any = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: A: List[Any] = 0 A: Dict = number while duplicate > 0: A , A: List[str] = divmod(__lowercase , 1_0 ) fact_sum += factorial(__lowercase ) return fact_sum == number if __name__ == "__main__": print('''Program to check whether a number is a Krisnamurthy Number or not.''') UpperCamelCase = int(input('''Enter number: ''').strip()) print( f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.' )
319
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: from transformers.testing_utils import pytest_terminal_summary_main A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: A: Tuple = 0 # Doctest custom flag to ignore output. UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''') UpperCamelCase = doctest.OutputChecker class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase = CustomOutputChecker UpperCamelCase = HfDoctestModule UpperCamelCase = HfDocTestParser
319
1
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {} UpperCamelCase = {} UpperCamelCase = {} def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase = None , ) -> str: A: Union[str, Any] = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" ) A: Union[str, Any] = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" ) A: Optional[int] = format_type def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase = None ) -> Optional[Any]: A: List[Any] = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A: List[str] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def SCREAMING_SNAKE_CASE( __lowercase , **__lowercase ) -> Formatter: A: int = get_format_type_from_alias(__lowercase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**__lowercase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
319
'''simple docstring''' import heapq import sys import numpy as np UpperCamelCase = tuple[int, int] class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> str: '''simple docstring''' A: Any = [] A: int = set() def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' return len(self.elements ) == 0 def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(SCREAMING_SNAKE_CASE_ ) else: # update # print("update", item) A: Optional[int] = [] ((A) , (A)): str = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((A) , (A)): int = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any: '''simple docstring''' if item in self.set: self.set.remove(SCREAMING_SNAKE_CASE_ ) A: str = [] ((A) , (A)): List[str] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((A) , (A)): Any = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return self.elements[0][1] def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' ((A) , (A)): Dict = heapq.heappop(self.elements ) self.set.remove(SCREAMING_SNAKE_CASE_ ) return (priority, item) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: # euclidean distance A: List[str] = np.array(__lowercase ) A: Optional[int] = np.array(__lowercase ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: # integer division by time variable return consistent_heuristic(__lowercase , __lowercase ) // t def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]: A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase ) return ans def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]: A: Union[str, Any] = np.chararray((n, n) ) for i in range(__lowercase ): for j in range(__lowercase ): A: Union[str, Any] = '''*''' for i in range(__lowercase ): for j in range(__lowercase ): if (j, (n - 1) - i) in blocks: A: Optional[Any] = '''#''' A: Tuple = '''-''' A: List[str] = back_pointer[goal] while x != start: ((A) , (A)): Tuple = x # print(x) A: List[str] = '''-''' A: str = back_pointer[x] A: Dict = '''-''' for i in range(__lowercase ): for j in range(__lowercase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A: List[str] = back_pointer[goal] while x != start: print(__lowercase , end=''' ''' ) A: Optional[int] = back_pointer[x] print(__lowercase ) sys.exit() def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]: for itera in range(__lowercase ): open_list[itera].remove_element(__lowercase ) # print("s", s) # print("j", j) ((A) , (A)): Tuple = s A: Optional[Any] = (x - 1, y) A: str = (x + 1, y) A: List[Any] = (x, y + 1) A: int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__lowercase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__lowercase ) A: int = -1 A: int = float('''inf''' ) if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1: A: List[str] = g_function[s] + 1 A: List[str] = s if neighbours not in close_list_anchor: open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) ) if neighbours not in close_list_inad: for var in range(1 , __lowercase ): if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key( __lowercase , 0 , __lowercase , __lowercase ): open_list[j].put( __lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( ) -> Tuple: A: str = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCamelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCamelCase = make_common_ground() UpperCamelCase = blocks_blk # hyper parameters UpperCamelCase = 1 UpperCamelCase = 1 UpperCamelCase = 20 UpperCamelCase = 3 # one consistent and two other inconsistent # start and end destination UpperCamelCase = (0, 0) UpperCamelCase = (n - 1, n - 1) UpperCamelCase = 1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: int = {start: 0, goal: float('''inf''' )} A: Union[str, Any] = {start: -1, goal: -1} A: List[Any] = [] A: Union[str, Any] = set() for i in range(__lowercase ): open_list.append(PriorityQueue() ) open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) A: list[int] = [] A: list[int] = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , __lowercase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A , A: Union[str, Any] = open_list[i].top_show() visited.add(__lowercase ) expand_state( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_inad.append(__lowercase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A: Union[str, Any] = open_list[0].top_show() visited.add(__lowercase ) expand_state( __lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_anchor.append(__lowercase ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__lowercase ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
319
1
'''simple docstring''' UpperCamelCase = [ (1000, '''M'''), (900, '''CM'''), (500, '''D'''), (400, '''CD'''), (100, '''C'''), (90, '''XC'''), (50, '''L'''), (40, '''XL'''), (10, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0} A: Dict = 0 A: Tuple = 0 while place < len(__lowercase ): if (place + 1 < len(__lowercase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def SCREAMING_SNAKE_CASE( __lowercase ) -> str: A: Any = [] for arabic, roman in ROMAN: ((A) , (A)): Tuple = divmod(__lowercase , __lowercase ) result.append(roman * factor ) if number == 0: break return "".join(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int: A: Any = 1 A: Optional[Any] = 0 for divide_by_number in range(__lowercase , digit + 1 ): A: list[int] = [] A: List[Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__lowercase ): A: Any = len(__lowercase ) A: Dict = divide_by_number else: has_been_divided.append(__lowercase ) A: str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(f'{price_plus_tax(100, 0.25) = }') print(f'{price_plus_tax(1_25.50, 0.05) = }')
319
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
1
'''simple docstring''' import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''artists_file''': '''artists.json''', '''lyrics_file''': '''lyrics.json''', '''genres_file''': '''genres.json''', } UpperCamelCase = { '''artists_file''': { '''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json''', }, '''genres_file''': { '''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json''', }, '''lyrics_file''': { '''jukebox''': '''https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json''', }, } UpperCamelCase = { '''jukebox''': 512, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_LYRIC_TOKENS_SIZES UpperCamelCase_ : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple=["v3", "v2", "v2"] , SCREAMING_SNAKE_CASE_ : List[str]=5_12 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : List[Any]="<|endoftext|>" , **SCREAMING_SNAKE_CASE_ : str , ) -> Tuple: '''simple docstring''' A: Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token super().__init__( unk_token=SCREAMING_SNAKE_CASE_ , n_genres=SCREAMING_SNAKE_CASE_ , version=SCREAMING_SNAKE_CASE_ , max_n_lyric_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Optional[int] = version A: Tuple = max_n_lyric_tokens A: Union[str, Any] = n_genres with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: List[Any] = json.load(SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: Optional[int] = json.load(SCREAMING_SNAKE_CASE_ ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: Optional[Any] = json.load(SCREAMING_SNAKE_CASE_ ) A: List[Any] = R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: A: Optional[Any] = oov.replace(R'''\-\'''' , R'''\-+\'''' ) A: Optional[Any] = regex.compile(SCREAMING_SNAKE_CASE_ ) A: Tuple = {v: k for k, v in self.artists_encoder.items()} A: Dict = {v: k for k, v in self.genres_encoder.items()} A: Optional[int] = {v: k for k, v in self.lyrics_encoder.items()} @property def _snake_case ( self : Optional[int] ) -> Tuple: '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple: '''simple docstring''' A: List[Any] = [self.artists_encoder.get(SCREAMING_SNAKE_CASE_ , 0 ) for artist in list_artists] for genres in range(len(SCREAMING_SNAKE_CASE_ ) ): A: Optional[int] = [self.genres_encoder.get(SCREAMING_SNAKE_CASE_ , 0 ) for genre in list_genres[genres]] A: Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) A: Optional[Any] = [[self.lyrics_encoder.get(SCREAMING_SNAKE_CASE_ , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]: '''simple docstring''' return list(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]: '''simple docstring''' A , A , A: Optional[Any] = self.prepare_for_tokenization(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = self._tokenize(SCREAMING_SNAKE_CASE_ ) return artist, genre, lyrics def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]: '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": A: Tuple = artists[idx].lower() A: str = [genres[idx].lower()] else: A: str = self._normalize(artists[idx] ) + '''.v2''' A: Dict = [ self._normalize(SCREAMING_SNAKE_CASE_ ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": A: Any = regex.compile(R'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) A: int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' A: List[str] = {vocab[index]: index + 1 for index in range(len(SCREAMING_SNAKE_CASE_ ) )} A: Optional[Any] = 0 A: Any = len(SCREAMING_SNAKE_CASE_ ) + 1 A: int = self.vocab A: Optional[Any] = {v: k for k, v in self.vocab.items()} A: List[Any] = '''''' else: A: Tuple = regex.compile(R'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) A: Optional[int] = self._run_strip_accents(SCREAMING_SNAKE_CASE_ ) A: List[Any] = lyrics.replace('''\\''' , '''\n''' ) A: Tuple = self.out_of_vocab.sub('''''' , SCREAMING_SNAKE_CASE_ ), [], [] return artists, genres, lyrics def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int: '''simple docstring''' A: str = unicodedata.normalize('''NFD''' , SCREAMING_SNAKE_CASE_ ) A: List[Any] = [] for char in text: A: int = unicodedata.category(SCREAMING_SNAKE_CASE_ ) if cat == "Mn": continue output.append(SCREAMING_SNAKE_CASE_ ) return "".join(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> str: '''simple docstring''' A: Optional[int] = ( [chr(SCREAMING_SNAKE_CASE_ ) for i in range(ord('''a''' ) , ord('''z''' ) + 1 )] + [chr(SCREAMING_SNAKE_CASE_ ) for i in range(ord('''A''' ) , ord('''Z''' ) + 1 )] + [chr(SCREAMING_SNAKE_CASE_ ) for i in range(ord('''0''' ) , ord('''9''' ) + 1 )] + ['''.'''] ) A: str = frozenset(SCREAMING_SNAKE_CASE_ ) A: Dict = re.compile(R'''_+''' ) A: List[str] = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) A: Union[str, Any] = pattern.sub('''_''' , SCREAMING_SNAKE_CASE_ ).strip('''_''' ) return text def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> str: '''simple docstring''' return " ".join(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> Optional[int]: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A: List[Any] = TensorType(SCREAMING_SNAKE_CASE_ ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf A: Any = tf.constant A: int = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch A: Optional[int] = torch.tensor A: str = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 A: List[Any] = jnp.array A: Optional[Any] = _is_jax else: A: str = np.asarray A: Dict = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: A: List[str] = [inputs] if not is_tensor(SCREAMING_SNAKE_CASE_ ): A: List[Any] = as_tensor(SCREAMING_SNAKE_CASE_ ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="pt" ) -> BatchEncoding: '''simple docstring''' A: Tuple = [0, 0, 0] A: Optional[Any] = [artist] * len(self.version ) A: Union[str, Any] = [genres] * len(self.version ) A , A , A: Union[str, Any] = self.tokenize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A , A , A: str = self._convert_token_to_id(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: int = [-INFINITY] * len(full_tokens[-1] ) A: Dict = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=SCREAMING_SNAKE_CASE_ ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: List[str] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=SCREAMING_SNAKE_CASE_ ) ) A: Optional[int] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=SCREAMING_SNAKE_CASE_ ) ) A: List[str] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=SCREAMING_SNAKE_CASE_ ) ) return (artists_file, genres_file, lyrics_file) def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]: '''simple docstring''' A: Tuple = self.artists_decoder.get(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = [self.genres_decoder.get(SCREAMING_SNAKE_CASE_ ) for genre in genres_index] A: Tuple = [self.lyrics_decoder.get(SCREAMING_SNAKE_CASE_ ) for character in lyric_index] return artist, genres, lyrics
319
'''simple docstring''' import fire from utils import calculate_rouge, save_json def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: A: Any = [x.strip() for x in open(__lowercase ).readlines()] A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )] A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase ) if save_path is not None: save_json(__lowercase , __lowercase , indent=__lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
319
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> None: '''simple docstring''' warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' import baseaa def SCREAMING_SNAKE_CASE( __lowercase ) -> bytes: return baseaa.baaencode(string.encode('''utf-8''' ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> str: return baseaa.baadecode(__lowercase ).decode('''utf-8''' ) if __name__ == "__main__": UpperCamelCase = '''Hello World!''' UpperCamelCase = baseaa_encode(test) print(encoded) UpperCamelCase = baseaa_decode(encoded) print(decoded)
319
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: List[Any] = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]: A: Tuple = OrderedDict() A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A: Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: int = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[int] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 5_1_2} A: List[str] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: List[str] = {'''visual_embedding_dim''': 2_0_4_8} A: Optional[int] = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: A: Optional[int] = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: str = '''nlvr''' A: Union[str, Any] = VisualBertConfig(**__lowercase ) # Load State Dict A: Union[str, Any] = load_state_dict(__lowercase ) A: str = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: Optional[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Any = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[str] = None UpperCamelCase_ : Optional[Any] = None @property def _snake_case ( self : Tuple ) -> str: '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''feature_size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''sampling_rate''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''padding_value''' ) ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: List[str] = self.feat_extract_tester.prepare_inputs_for_common() A: List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) A: Optional[int] = feat_extract.model_input_names[0] A: str = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) for x, y in zip(SCREAMING_SNAKE_CASE_ , processed_features[input_name] ) ) ) A: List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) A: Any = processed_features[input_name] if len(batch_features_input.shape ) < 3: A: Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) A: List[str] = feat_extract.model_input_names[0] A: Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) A: Union[str, Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: A: Any = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _snake_case ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' A: str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=SCREAMING_SNAKE_CASE_ ) A: List[str] = self.feature_extraction_class(**self.feat_extract_dict ) A: Union[str, Any] = feat_extract.model_input_names[0] A: Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) A: Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: A: Optional[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> Dict: '''simple docstring''' def _inputs_have_equal_length(SCREAMING_SNAKE_CASE_ : Optional[int] ): A: Tuple = len(input[0] ) for input_slice in input[1:]: if len(SCREAMING_SNAKE_CASE_ ) != length: return False return True def _inputs_are_equal(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): return False for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE_ ) , np.asarray(SCREAMING_SNAKE_CASE_ ) , atol=1E-3 ): return False return True A: int = self.feature_extraction_class(**self.feat_extract_dict ) A: int = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE_ ) A: Dict = feat_extract.model_input_names[0] A: Optional[int] = BatchFeature({input_name: speech_inputs} ) A: Optional[int] = self.feat_extract_tester.seq_length_diff A: List[Any] = self.feat_extract_tester.max_seq_length + pad_diff A: List[str] = self.feat_extract_tester.min_seq_length A: int = self.feat_extract_tester.batch_size A: List[Any] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy A: int = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = input_a[input_name] A: int = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' ) A: Tuple = input_a[input_name] A: Dict = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) A: Tuple = input_a[input_name] A: Union[str, Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''np''' ) A: Optional[Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(SCREAMING_SNAKE_CASE_ ): feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''max_length''' )[input_name] A: Tuple = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) A: Optional[Any] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy A: Any = feat_extract.pad(SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=10 ) A: Any = input_a[input_name] A: str = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , pad_to_multiple_of=10 ) A: Optional[Any] = input_a[input_name] A: int = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = input_a[input_name] A: List[str] = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , ) A: Union[str, Any] = input_a[input_name] self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) A: str = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(SCREAMING_SNAKE_CASE_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct A: Optional[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> Tuple: '''simple docstring''' def _inputs_have_equal_length(SCREAMING_SNAKE_CASE_ : str ): A: List[Any] = len(input[0] ) for input_slice in input[1:]: if len(SCREAMING_SNAKE_CASE_ ) != length: return False return True def _inputs_are_equal(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ): if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): return False for input_slice_a, input_slice_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): if not np.allclose(np.asarray(SCREAMING_SNAKE_CASE_ ) , np.asarray(SCREAMING_SNAKE_CASE_ ) , atol=1E-3 ): return False return True A: List[str] = self.feature_extraction_class(**self.feat_extract_dict ) A: int = self.feat_extract_tester.prepare_inputs_for_common(numpify=SCREAMING_SNAKE_CASE_ ) A: List[str] = feat_extract.model_input_names[0] A: Tuple = BatchFeature({input_name: speech_inputs} ) # truncate to smallest A: Any = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=SCREAMING_SNAKE_CASE_ ) A: Any = input_a[input_name] A: Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) A: List[str] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) # truncate to smallest with np A: Any = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=SCREAMING_SNAKE_CASE_ , ) A: Any = input_a[input_name] A: List[Any] = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) A: Optional[int] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) # truncate to middle A: Optional[int] = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' , ) A: Dict = input_a[input_name] A: List[Any] = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=SCREAMING_SNAKE_CASE_ ) A: int = input_a[input_name] A: Dict = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) A: Tuple = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(_inputs_are_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(SCREAMING_SNAKE_CASE_ ): feat_extract.pad(SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(SCREAMING_SNAKE_CASE_ ): feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , truncation=SCREAMING_SNAKE_CASE_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(SCREAMING_SNAKE_CASE_ ): feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , truncation=SCREAMING_SNAKE_CASE_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(SCREAMING_SNAKE_CASE_ ): feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy A: List[Any] = 12 A: int = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = input_a[input_name] A: int = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , ) A: Tuple = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of A: List[Any] = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: A: Any = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) self.assertFalse(_inputs_have_equal_length(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Union[str, Any] ) -> Dict: '''simple docstring''' self._check_padding(numpify=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> Tuple: '''simple docstring''' self._check_padding(numpify=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' self._check_truncation(numpify=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Dict: '''simple docstring''' self._check_truncation(numpify=SCREAMING_SNAKE_CASE_ ) @require_torch def _snake_case ( self : int ) -> str: '''simple docstring''' A: str = self.feature_extraction_class(**self.feat_extract_dict ) A: Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() A: Optional[int] = feat_extract.model_input_names[0] A: int = BatchFeature({input_name: speech_inputs} ) A: Optional[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''np''' )[input_name] A: Optional[int] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def _snake_case ( self : Union[str, Any] ) -> Any: '''simple docstring''' A: Dict = self.feature_extraction_class(**self.feat_extract_dict ) A: Tuple = self.feat_extract_tester.prepare_inputs_for_common() A: Tuple = feat_extract.model_input_names[0] A: Union[str, Any] = BatchFeature({input_name: speech_inputs} ) A: List[Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''np''' )[input_name] A: Union[str, Any] = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = self.feat_extract_dict A: Dict = True A: List[Any] = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ ) A: int = self.feat_extract_tester.prepare_inputs_for_common() A: Dict = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] A: Optional[Any] = feat_extract.model_input_names[0] A: Dict = BatchFeature({input_name: speech_inputs} ) A: Any = feat_extract.pad(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.feat_extract_dict A: Union[str, Any] = True A: str = self.feature_extraction_class(**SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common() A: int = [len(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] A: Optional[Any] = feat_extract.model_input_names[0] A: Tuple = BatchFeature({input_name: speech_inputs} ) A: List[str] = min(SCREAMING_SNAKE_CASE_ ) A: Any = feat_extract.pad( SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
319
'''simple docstring''' from itertools import permutations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A: int = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__lowercase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int: return sum( int(''''''.join(map(__lowercase , __lowercase ) ) ) for num in permutations(range(__lowercase ) ) if is_substring_divisible(__lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
319
1
'''simple docstring''' # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class lowerCAmelCase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : bool = True ) -> None: '''simple docstring''' A: dict[T, list[T]] = {} # dictionary of lists A: Optional[Any] = directed def _snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : T , SCREAMING_SNAKE_CASE_ : T ) -> GraphAdjacencyList[T]: '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE_ ) self.adj_list[destination_vertex].append(SCREAMING_SNAKE_CASE_ ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE_ ) A: str = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: A: str = [destination_vertex] A: int = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE_ ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: A: str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: A: List[str] = [destination_vertex] A: Dict = [] return self def __repr__( self : Union[str, Any] ) -> str: '''simple docstring''' return pformat(self.adj_list )
319
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
1
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def SCREAMING_SNAKE_CASE( __lowercase="" ) -> str: A: Tuple = tempfile.mkdtemp() return os.path.join(__lowercase , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Tuple ) -> str: '''simple docstring''' A: Dict = torch.rand(12 , dtype=torch.floataa ) - 0.5 A: Tuple = AgentAudio(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor A , A: Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , torch.tensor(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 ) ) def _snake_case ( self : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5 A: Dict = get_new_path(suffix='''.wav''' ) sf.write(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1_60_00 ) A: Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : int ) -> Optional[int]: '''simple docstring''' A: int = torch.randint(0 , 2_56 , (64, 64, 3) ) A: Optional[int] = AgentImage(SCREAMING_SNAKE_CASE_ ) A: Dict = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : int ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' A: Dict = Image.open(SCREAMING_SNAKE_CASE_ ) A: int = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : Optional[Any] ) -> str: '''simple docstring''' A: List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' A: int = Image.open(SCREAMING_SNAKE_CASE_ ) A: Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Union[str, Any] = '''Hey!''' A: Optional[Any] = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_ , agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): raise TypeError('''only integers accepted as input''' ) else: A: str = str(abs(__lowercase ) ) A: int = [list(__lowercase ) for char in range(len(__lowercase ) )] for index in range(len(__lowercase ) ): num_transpositions[index].pop(__lowercase ) return max( int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
319
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { '''configuration_clap''': [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapAudioConfig''', '''ClapConfig''', '''ClapTextConfig''', ], '''processing_clap''': ['''ClapProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ClapModel''', '''ClapPreTrainedModel''', '''ClapTextModel''', '''ClapTextModelWithProjection''', '''ClapAudioModel''', '''ClapAudioModelWithProjection''', ] UpperCamelCase = ['''ClapFeatureExtractor'''] if TYPE_CHECKING: from .configuration_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioConfig, ClapConfig, ClapTextConfig, ) from .processing_clap import ClapProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clap import ClapFeatureExtractor from .modeling_clap import ( CLAP_PRETRAINED_MODEL_ARCHIVE_LIST, ClapAudioModel, ClapAudioModelWithProjection, ClapModel, ClapPreTrainedModel, ClapTextModel, ClapTextModelWithProjection, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' from __future__ import annotations import math def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) A: str = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]: if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) A: Union[str, Any] = len(__lowercase ) A: str = matrix_length // 2 A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )] A: Optional[Any] = [ [a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase ) ] A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )] A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )] return top_left, top_right, bot_left, bot_right def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]: return len(__lowercase ), len(matrix[0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> None: print('''\n'''.join(str(__lowercase ) for line in matrix ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase ) == (2, 2): return default_matrix_multiplication(__lowercase , __lowercase ) A , A , A , A: Union[str, Any] = split_matrix(__lowercase ) A , A , A , A: List[Any] = split_matrix(__lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) A: Any = matrix_addition(__lowercase , __lowercase ) A: List[Any] = matrix_addition(__lowercase , __lowercase ) A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) # construct the new matrix from our 4 quadrants A: Union[str, Any] = [] for i in range(len(__lowercase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__lowercase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]: A: int = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(__lowercase ) A: str = matrix_dimensions(__lowercase ) A: str = matrix_dimensions(__lowercase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] A: Union[str, Any] = max(*__lowercase , *__lowercase ) A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) ) A: List[Any] = matrixa A: Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) A: Any = actual_strassen(__lowercase , __lowercase ) # Removing the additional zeros for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": UpperCamelCase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
319
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Union[List[PIL.Image.Image], np.ndarray] UpperCamelCase_ : Optional[List[bool]] UpperCamelCase_ : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
319
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = 2 @register_to_config def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = sigma_max # setable values A: int = None A: np.IntTensor = None A: torch.FloatTensor = None # sigma(t_i) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' A: List[Any] = num_inference_steps A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) A: str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A: List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) A: Optional[Any] = sigma + gamma * sigma A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: Union[str, Any] = sample_hat + sigma_hat * model_output A: str = (sample_hat - pred_original_sample) / sigma_hat A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: int = sample_prev + sigma_prev * model_output A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' raise NotImplementedError()
319
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ) -> int: '''simple docstring''' A: Optional[Any] = '''ZinengTang/tvlt-base''' A: List[Any] = tempfile.mkdtemp() def _snake_case ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Dict ) -> str: '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , **SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]: '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _snake_case ( self : List[Any] ) -> int: '''simple docstring''' A: List[str] = self.get_image_processor() A: str = self.get_feature_extractor() A: int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(self.tmpdirname ) A: List[str] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any ) -> Union[str, Any]: '''simple docstring''' A: int = self.get_image_processor() A: Any = self.get_feature_extractor() A: int = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) A: Any = np.ones([1_20_00] ) A: Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) A: int = processor(audio=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _snake_case ( self : Dict ) -> Dict: '''simple docstring''' A: Optional[int] = self.get_image_processor() A: List[Any] = self.get_feature_extractor() A: List[Any] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = np.ones([3, 2_24, 2_24] ) A: Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) A: Any = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _snake_case ( self : str ) -> Dict: '''simple docstring''' A: Union[str, Any] = self.get_image_processor() A: Tuple = self.get_feature_extractor() A: Any = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) A: Any = np.ones([1_20_00] ) A: Dict = np.ones([3, 2_24, 2_24] ) A: Union[str, Any] = processor(audio=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A: Union[str, Any] = self.get_image_processor() A: List[str] = self.get_feature_extractor() A: List[Any] = TvltProcessor(image_processor=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
319
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if self.train_file is not None: A: Tuple = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A: str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())] assert len(__lowercase ) == len(__lowercase ) A: Optional[int] = {c: dataset[c] for c in dataset.column_names} A: Union[str, Any] = refs return Dataset.from_dict(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A: Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) A: Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: A: Any = {} if data_args.train_file is not None: A: int = data_args.train_file if data_args.validation_file is not None: A: Optional[int] = data_args.validation_file A: List[str] = data_args.train_file.split('''.''' )[-1] if extension == "txt": A: int = '''text''' A: Any = load_dataset(__lowercase , data_files=__lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A: Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: A: str = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) A: Tuple = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase ) elif model_args.model_name_or_path: A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: A: List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A: int = datasets['''train'''].column_names else: A: str = datasets['''validation'''].column_names A: Tuple = '''text''' if '''text''' in column_names else column_names[0] A: List[str] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__lowercase ): # Remove empty lines A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length ) A: str = datasets.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A: Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A: List[Any] = False # Data collator # This one will take care of randomly masking the tokens. A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: A: Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A: str = model_args.model_name_or_path else: A: List[str] = None A: str = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation A: Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A: Optional[Any] = trainer.evaluate() A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) A: Dict = perplexity A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
319
1
'''simple docstring''' import numpy as np def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict: A: List[Any] = int(np.ceil((x_end - xa) / h ) ) A: int = np.zeros((n + 1,) ) A: Tuple = ya A: Dict = xa for k in range(__lowercase ): A: Tuple = f(__lowercase , y[k] ) A: Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) A: int = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) A: List[str] = f(x + h , y[k] + h * ka ) A: Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : Tuple = False def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() A: Optional[int] = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: A: int = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: A: Dict = toks + toks # toks_str = [t[1] for t in toks] A: Union[str, Any] = [t[0] for t in toks] # Ensure consistency A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: A: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: A: Tuple = ''' ''' + output_txt A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Any = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[str] = '''Hello how are you''' A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Optional[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A: List[str] = tokenizer.decode(sample_ids[0] ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: List[Any] = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Optional[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off A: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A: Tuple = tokenizer.decode(sample_ids[0] ) A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _snake_case ( self : int ) -> List[str]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Union[str, Any] = '''Hello how are you''' A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Any = '''Hello how are you''' A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''Hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: str = '''Hello how Are you''' A: Union[str, Any] = '''hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off A: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Any = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: str = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) ) # transform list to ModelOutput A: Dict = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): [recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off A: int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _snake_case ( self : int ) -> int: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _snake_case ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' pass def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: str = tokenizer.vocab_size A: str = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) ) A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) ) A: int = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass def _snake_case ( self : str ) -> Tuple: '''simple docstring''' A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = """focalnet""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_24 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=96 , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : int=[1_92, 3_84, 7_68, 7_68] , SCREAMING_SNAKE_CASE_ : Dict=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE_ : Dict=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_ : Dict=[3, 3, 3, 3] , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Optional[int]=4.0 , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : int=1E-4 , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Tuple=1E-5 , SCREAMING_SNAKE_CASE_ : List[Any]=32 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Tuple: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = image_size A: str = patch_size A: Any = num_channels A: Dict = embed_dim A: int = use_conv_embed A: Union[str, Any] = hidden_sizes A: Dict = depths A: Optional[Any] = focal_levels A: List[Any] = focal_windows A: List[str] = hidden_act A: Dict = mlp_ratio A: int = hidden_dropout_prob A: List[Any] = drop_path_rate A: Tuple = use_layerscale A: Optional[int] = layerscale_value A: Tuple = use_post_layernorm A: Tuple = use_post_layernorm_in_modulation A: str = normalize_modulator A: Any = initializer_range A: str = layer_norm_eps A: Tuple = encoder_stride A: Optional[Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] A , A: List[str] = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
319
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase ) -> list[int]: A: Optional[int] = [True] * limit A: List[Any] = False A: Optional[Any] = False A: str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): A: List[str] = i * 2 while index < limit: A: Tuple = False A: Optional[int] = index + i A: Optional[int] = [2] for i in range(3 , __lowercase , 2 ): if is_prime[i]: primes.append(__lowercase ) return primes def SCREAMING_SNAKE_CASE( __lowercase = 1_0_0_0_0_0_0 ) -> int: A: Tuple = prime_sieve(__lowercase ) A: int = 0 A: Any = 0 for i in range(len(__lowercase ) ): for j in range(i + length , len(__lowercase ) ): A: Optional[Any] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: A: List[str] = j - i A: Optional[int] = sol return largest if __name__ == "__main__": print(f'{solution() = }')
319
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports UpperCamelCase = ''' import os ''' UpperCamelCase = ''' def foo(): import os return False ''' UpperCamelCase = ''' def foo(): def bar(): if True: import os return False return bar() ''' UpperCamelCase = ''' import os try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except ImportError as e: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' UpperCamelCase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Tuple = os.path.join(__lowercase , '''test_file.py''' ) with open(__lowercase , '''w''' ) as _tmp_file: _tmp_file.write(__lowercase ) A: List[Any] = get_imports(__lowercase ) assert parsed_imports == ["os"]
319
1
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE( __lowercase = "" ) -> dict[str, float]: A: Tuple = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' A: List[Any] = BeautifulSoup(requests.get(__lowercase ).text , '''html.parser''' ) A: int = soup.find_all('''td''' , attrs='''titleColumn''' ) A: List[str] = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(__lowercase , __lowercase ) } def SCREAMING_SNAKE_CASE( __lowercase = "IMDb_Top_250_Movies.csv" ) -> None: A: Dict = get_imdb_top_aaa_movies() with open(__lowercase , '''w''' , newline='''''' ) as out_file: A: Dict = csv.writer(__lowercase ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
319
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]: A: str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: for i in range(config.num_hidden_layers ): A: Tuple = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A: Dict = in_proj_weight[ : config.hidden_size, : ] A: int = in_proj_bias[: config.hidden_size] A: Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A: int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A: Optional[int] = in_proj_weight[ -config.hidden_size :, : ] A: Optional[Any] = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Optional[int] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: List[Any] = dct.pop(__lowercase ) A: int = val @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase ) A: Tuple = False A: str = False A: List[Any] = False A: Optional[int] = False if "vqa" in checkpoint_url: A: Union[str, Any] = True A: Union[str, Any] = 3_1_2_9 A: List[Any] = '''huggingface/label-files''' A: Any = '''vqa2-id2label.json''' A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()} A: Any = idalabel A: Optional[Any] = {v: k for k, v in idalabel.items()} A: List[str] = ViltForQuestionAnswering(__lowercase ) elif "nlvr" in checkpoint_url: A: Dict = True A: str = 2 A: Union[str, Any] = {0: '''False''', 1: '''True'''} A: Any = {v: k for k, v in config.idalabel.items()} A: Optional[Any] = 3 A: Any = ViltForImagesAndTextClassification(__lowercase ) elif "irtr" in checkpoint_url: A: Tuple = True A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase ) elif "mlm_itm" in checkpoint_url: A: Tuple = True A: Optional[int] = ViltForMaskedLM(__lowercase ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict'''] A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) if mlm_model or irtr_model: A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__lowercase ) # Define processor A: Optional[Any] = ViltImageProcessor(size=3_8_4 ) A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' ) A: Optional[int] = ViltProcessor(__lowercase , __lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: Any = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[str] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw ) if mlm_model: A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].''' else: A: Optional[int] = '''How many cats are there?''' A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: Any = model(**__lowercase ) # Verify outputs if mlm_model: A: Any = torch.Size([1, 1_1, 3_0_5_2_2] ) A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify masked token prediction equals "cats" A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: A: Any = torch.Size([1, 3_1_2_9] ) A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify vqa prediction equals "2" A: Dict = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: A: Union[str, Any] = torch.Size([1, 2] ) A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { '''configuration_mobilenet_v2''': [ '''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileNetV2Config''', '''MobileNetV2OnnxConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''MobileNetV2FeatureExtractor'''] UpperCamelCase = ['''MobileNetV2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileNetV2ForImageClassification''', '''MobileNetV2ForSemanticSegmentation''', '''MobileNetV2Model''', '''MobileNetV2PreTrainedModel''', '''load_tf_weights_in_mobilenet_v2''', ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } UpperCamelCase = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: A: Tuple = EfficientNetConfig() A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim'''] A: Optional[int] = CONFIG_MAP[model_name]['''width_coef'''] A: str = CONFIG_MAP[model_name]['''depth_coef'''] A: Dict = CONFIG_MAP[model_name]['''image_size'''] A: str = CONFIG_MAP[model_name]['''dropout_rate'''] A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding'''] A: Optional[Any] = '''huggingface/label-files''' A: List[str] = '''imagenet-1k-id2label.json''' A: Dict = 1_0_0_0 A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()} A: int = idalabel A: Tuple = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE( ) -> Any: A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: List[str] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , ) return preprocessor def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] A: List[str] = sorted(set(__lowercase ) ) A: Dict = len(__lowercase ) A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )} A: Optional[int] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: A: int = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) A: Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: A: str = '''efficientnet.''' + item[1] A: int = '''classifier.weight''' A: Tuple = '''classifier.bias''' return key_mapping def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue A: Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) ) else: A: Any = torch.from_numpy(__lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__lowercase ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: A: Optional[int] = model_classes[model_name]( include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , ) A: List[str] = original_model.trainable_variables A: Optional[Any] = original_model.non_trainable_variables A: Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A: int = param.numpy() A: Tuple = list(tf_params.keys() ) # Load HuggingFace model A: Dict = get_efficientnet_config(__lowercase ) A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval() A: Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) A: int = rename_keys(__lowercase ) replace_params(__lowercase , __lowercase , __lowercase ) # Initialize preprocessor and preprocess input image A: List[Any] = convert_image_processor(__lowercase ) A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): A: str = hf_model(**__lowercase ) A: List[Any] = outputs.logits.detach().numpy() # Original model inference A: Any = False A: List[Any] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A: str = image.img_to_array(__lowercase ) A: Dict = np.expand_dims(__lowercase , axis=0 ) A: Any = original_model.predict(__lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(__lowercase ): os.mkdir(__lowercase ) # Save converted model and image processor hf_model.save_pretrained(__lowercase ) preprocessor.save_pretrained(__lowercase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) A: int = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(__lowercase ) hf_model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') UpperCamelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : List[str] = None UpperCamelCase_ : Tuple = BloomTokenizerFast UpperCamelCase_ : Dict = BloomTokenizerFast UpperCamelCase_ : Dict = True UpperCamelCase_ : List[str] = False UpperCamelCase_ : Any = """tokenizer_file""" UpperCamelCase_ : Optional[int] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def _snake_case ( self : List[Any] ) -> List[str]: '''simple docstring''' super().setUp() A: List[str] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: List[str] = self.get_rust_tokenizer() A: List[Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>'''] A: int = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]] A: List[str] = tokenizer.batch_encode_plus(SCREAMING_SNAKE_CASE_ )['''input_ids'''] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str=6 ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A: Optional[int] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input A: Any = '''This is a simple input''' A: Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] A: List[str] = ('''This is a simple input''', '''This is a pair''') A: Optional[Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests try: tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) tokenizer_r.encode(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) tokenizer_r.batch_encode_plus(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) except ValueError: self.fail('''Bloom Tokenizer should be able to deal with padding''' ) A: Optional[Any] = None # Hotfixing padding = None self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' ) # Simple input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' ) # Simple input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' ) # Pair input self.assertRaises(SCREAMING_SNAKE_CASE_ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' ) # Pair input self.assertRaises( SCREAMING_SNAKE_CASE_ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' , ) def _snake_case ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' A: Union[str, Any] = self.get_rust_tokenizer() A: Any = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = next(iter(SCREAMING_SNAKE_CASE_ ) )['''premise'''] # pick up one data A: Tuple = list(sample_data.values() ) A: Any = list(map(tokenizer.encode , SCREAMING_SNAKE_CASE_ ) ) A: int = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) for x in output_tokens] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> List[str]: '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
319
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
1
'''simple docstring''' from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: return np.maximum(0 , __lowercase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
319
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = Dict[str, Any] UpperCamelCase = List[Prediction] @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Any = {} if "threshold" in kwargs: A: List[Any] = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: int = load_image(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = torch.IntTensor([[image.height, image.width]] ) A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) A: Any = target_size return inputs def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' A: Tuple = model_inputs.pop('''target_size''' ) A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ ) A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: A: Dict = model_inputs['''bbox'''] return model_outputs def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A , A: Union[str, Any] = target_size[0].tolist() def unnormalize(SCREAMING_SNAKE_CASE_ : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )] A: Dict = ['''score''', '''label''', '''box'''] A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = raw_annotations[0] A: List[Any] = raw_annotation['''scores'''] A: List[Any] = raw_annotation['''labels'''] A: int = raw_annotation['''boxes'''] A: Any = scores.tolist() A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels] A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A: Tuple = ['''score''', '''label''', '''box'''] A: str = [ dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) A , A , A , A: str = box.int().tolist() A: str = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
319
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCamelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-classification/requirements.txt''') UpperCamelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]: with open(__lowercase , '''rb''' ) as f: A: int = Image.open(__lowercase ) return im.convert('''RGB''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the training data."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """A folder containing the validation data."""} ) UpperCamelCase_ : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : str = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : str = field(default=UpperCAmelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: A: str = torch.stack([example['''pixel_values'''] for example in examples] ) A: int = torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def SCREAMING_SNAKE_CASE( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: str = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , __lowercase , __lowercase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A: Tuple = training_args.get_process_log_level() logger.setLevel(__lowercase ) transformers.utils.logging.set_verbosity(__lowercase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. A: Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: A: List[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: A: List[Any] = {} if data_args.train_dir is not None: A: List[str] = os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: A: Any = os.path.join(data_args.validation_dir , '''**''' ) A: Dict = load_dataset( '''imagefolder''' , data_files=__lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. A: Tuple = None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __lowercase ) and data_args.train_val_split > 0.0: A: Optional[Any] = dataset['''train'''].train_test_split(data_args.train_val_split ) A: List[Any] = split['''train'''] A: str = split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A: Optional[int] = dataset['''train'''].features['''labels'''].names A , A: List[str] = {}, {} for i, label in enumerate(__lowercase ): A: List[str] = str(__lowercase ) A: int = label # Load the accuracy metric from the datasets package A: Dict = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowercase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) A: List[str] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel=__lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A: int = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) A: Union[str, Any] = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: A: str = image_processor.size['''shortest_edge'''] else: A: Any = (image_processor.size['''height'''], image_processor.size['''width''']) A: str = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) A: Tuple = Compose( [ RandomResizedCrop(__lowercase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) A: Union[str, Any] = Compose( [ Resize(__lowercase ), CenterCrop(__lowercase ), ToTensor(), normalize, ] ) def train_transforms(__lowercase ): A: Union[str, Any] = [ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(__lowercase ): A: List[str] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: A: List[Any] = ( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(__lowercase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: A: Tuple = ( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(__lowercase ) # Initalize our trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=__lowercase , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: A: Optional[Any] = None if training_args.resume_from_checkpoint is not None: A: Dict = training_args.resume_from_checkpoint elif last_checkpoint is not None: A: List[str] = last_checkpoint A: Optional[Any] = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A: Dict = trainer.evaluate() trainer.log_metrics('''eval''' , __lowercase ) trainer.save_metrics('''eval''' , __lowercase ) # Write model card and (optionally) push to hub A: int = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**__lowercase ) else: trainer.create_model_card(**__lowercase ) if __name__ == "__main__": main()
319
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = """convbert""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Dict = vocab_size A: Tuple = hidden_size A: Optional[int] = num_hidden_layers A: List[str] = num_attention_heads A: int = intermediate_size A: int = hidden_act A: List[str] = hidden_dropout_prob A: int = attention_probs_dropout_prob A: Tuple = max_position_embeddings A: Any = type_vocab_size A: str = initializer_range A: Union[str, Any] = layer_norm_eps A: str = embedding_size A: Optional[int] = head_ratio A: List[Any] = conv_kernel_size A: List[Any] = num_groups A: Optional[int] = classifier_dropout class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A: List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
319
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if len(__lowercase ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) A: Any = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> bool: A: Union[str, Any] = len(__lowercase ) A: str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): A: Any = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): A: Optional[Any] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: A: Any = subset[i - 1][j] if arr[i - 1] <= j: A: Tuple = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: from transformers.testing_utils import pytest_terminal_summary_main A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: A: Tuple = 0 # Doctest custom flag to ignore output. UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''') UpperCamelCase = doctest.OutputChecker class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase = CustomOutputChecker UpperCamelCase = HfDoctestModule UpperCamelCase = HfDocTestParser
319
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = '''▁''' UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}, '''tokenizer_file''': { '''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json''' }, } UpperCamelCase = { '''google/pegasus-xsum''': 512, } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = PegasusTokenizer UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<mask_2>" , SCREAMING_SNAKE_CASE_ : List[Any]="<mask_1>" , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=1_03 , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict: '''simple docstring''' A: Tuple = offset if additional_special_tokens is not None: if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise TypeError( f"""additional_special_tokens should be of type {type(SCREAMING_SNAKE_CASE_ )}, but is""" f""" {type(SCREAMING_SNAKE_CASE_ )}""" ) A: str = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f"""<unk_{i}>""" for i in range(len(SCREAMING_SNAKE_CASE_ ) , self.offset - 1 ) ] if len(set(SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" ) A: List[Any] = additional_special_tokens_extended else: A: Dict = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )] super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , mask_token_sent=SCREAMING_SNAKE_CASE_ , offset=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Optional[int] = vocab_file A: Dict = False if not self.vocab_file else True def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> str: '''simple docstring''' A: List[str] = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" ) return [1 if x in all_special_ids else 0 for x in seq] def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List , SCREAMING_SNAKE_CASE_ : Optional[List] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) elif token_ids_a is None: return self._special_token_mask(SCREAMING_SNAKE_CASE_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
319
'''simple docstring''' import heapq import sys import numpy as np UpperCamelCase = tuple[int, int] class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> str: '''simple docstring''' A: Any = [] A: int = set() def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' return len(self.elements ) == 0 def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(SCREAMING_SNAKE_CASE_ ) else: # update # print("update", item) A: Optional[int] = [] ((A) , (A)): str = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((A) , (A)): int = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any: '''simple docstring''' if item in self.set: self.set.remove(SCREAMING_SNAKE_CASE_ ) A: str = [] ((A) , (A)): List[str] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((A) , (A)): Any = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return self.elements[0][1] def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' ((A) , (A)): Dict = heapq.heappop(self.elements ) self.set.remove(SCREAMING_SNAKE_CASE_ ) return (priority, item) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: # euclidean distance A: List[str] = np.array(__lowercase ) A: Optional[int] = np.array(__lowercase ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: # integer division by time variable return consistent_heuristic(__lowercase , __lowercase ) // t def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]: A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase ) return ans def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]: A: Union[str, Any] = np.chararray((n, n) ) for i in range(__lowercase ): for j in range(__lowercase ): A: Union[str, Any] = '''*''' for i in range(__lowercase ): for j in range(__lowercase ): if (j, (n - 1) - i) in blocks: A: Optional[Any] = '''#''' A: Tuple = '''-''' A: List[str] = back_pointer[goal] while x != start: ((A) , (A)): Tuple = x # print(x) A: List[str] = '''-''' A: str = back_pointer[x] A: Dict = '''-''' for i in range(__lowercase ): for j in range(__lowercase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A: List[str] = back_pointer[goal] while x != start: print(__lowercase , end=''' ''' ) A: Optional[int] = back_pointer[x] print(__lowercase ) sys.exit() def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]: for itera in range(__lowercase ): open_list[itera].remove_element(__lowercase ) # print("s", s) # print("j", j) ((A) , (A)): Tuple = s A: Optional[Any] = (x - 1, y) A: str = (x + 1, y) A: List[Any] = (x, y + 1) A: int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__lowercase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__lowercase ) A: int = -1 A: int = float('''inf''' ) if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1: A: List[str] = g_function[s] + 1 A: List[str] = s if neighbours not in close_list_anchor: open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) ) if neighbours not in close_list_inad: for var in range(1 , __lowercase ): if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key( __lowercase , 0 , __lowercase , __lowercase ): open_list[j].put( __lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( ) -> Tuple: A: str = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCamelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCamelCase = make_common_ground() UpperCamelCase = blocks_blk # hyper parameters UpperCamelCase = 1 UpperCamelCase = 1 UpperCamelCase = 20 UpperCamelCase = 3 # one consistent and two other inconsistent # start and end destination UpperCamelCase = (0, 0) UpperCamelCase = (n - 1, n - 1) UpperCamelCase = 1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: int = {start: 0, goal: float('''inf''' )} A: Union[str, Any] = {start: -1, goal: -1} A: List[Any] = [] A: Union[str, Any] = set() for i in range(__lowercase ): open_list.append(PriorityQueue() ) open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) A: list[int] = [] A: list[int] = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , __lowercase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A , A: Union[str, Any] = open_list[i].top_show() visited.add(__lowercase ) expand_state( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_inad.append(__lowercase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A: Union[str, Any] = open_list[0].top_show() visited.add(__lowercase ) expand_state( __lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_anchor.append(__lowercase ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__lowercase ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
319
1
'''simple docstring''' from __future__ import annotations from typing import Any class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 0 ) -> None: '''simple docstring''' A , A: int = row, column A: Any = [[default_value for c in range(SCREAMING_SNAKE_CASE_ )] for r in range(SCREAMING_SNAKE_CASE_ )] def __str__( self : Optional[Any] ) -> str: '''simple docstring''' A: List[str] = f"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier A: str = 0 for row_vector in self.array: for obj in row_vector: A: str = max(SCREAMING_SNAKE_CASE_ , len(str(SCREAMING_SNAKE_CASE_ ) ) ) A: int = f"""%{max_element_length}s""" # Make string and return def single_line(SCREAMING_SNAKE_CASE_ : list[float] ) -> str: nonlocal string_format_identifier A: Dict = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(SCREAMING_SNAKE_CASE_ ) for row_vector in self.array ) return s def __repr__( self : List[str] ) -> str: '''simple docstring''' return str(self ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : tuple[int, int] ) -> bool: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Any , SCREAMING_SNAKE_CASE_ : tuple[int, int] ) -> Any: '''simple docstring''' assert self.validate_indicies(SCREAMING_SNAKE_CASE_ ) return self.array[loc[0]][loc[1]] def __setitem__( self : int , SCREAMING_SNAKE_CASE_ : tuple[int, int] , SCREAMING_SNAKE_CASE_ : float ) -> None: '''simple docstring''' assert self.validate_indicies(SCREAMING_SNAKE_CASE_ ) A: Any = value def __add__( self : str , SCREAMING_SNAKE_CASE_ : Matrix ) -> Matrix: '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert self.row == another.row and self.column == another.column # Add A: Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A: Dict = self[r, c] + another[r, c] return result def __neg__( self : Optional[Any] ) -> Matrix: '''simple docstring''' A: Tuple = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A: Union[str, Any] = -self[r, c] return result def __sub__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Matrix ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float | Matrix ) -> Matrix: '''simple docstring''' if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ): # Scalar multiplication A: Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A: Optional[Any] = self[r, c] * another return result elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # Matrix multiplication assert self.column == another.row A: str = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: A: Tuple = f"""Unsupported type given for another ({type(SCREAMING_SNAKE_CASE_ )})""" raise TypeError(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Matrix: '''simple docstring''' A: str = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): A: Optional[Any] = self[r, c] return result def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Matrix , SCREAMING_SNAKE_CASE_ : Matrix ) -> Any: '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate A: Optional[Any] = v.transpose() A: List[str] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def SCREAMING_SNAKE_CASE( ) -> None: # a^(-1) A: Optional[int] = Matrix(3 , 3 , 0 ) for i in range(3 ): A: Any = 1 print(F"""a^(-1) is {ainv}""" ) # u, v A: str = Matrix(3 , 1 , 0 ) A , A , A: Union[str, Any] = 1, 2, -3 A: Union[str, Any] = Matrix(3 , 1 , 0 ) A , A , A: Union[str, Any] = 4, -2, 5 print(F"""u is {u}""" ) print(F"""v is {v}""" ) print(F"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}""" ) def SCREAMING_SNAKE_CASE( ) -> None: import doctest doctest.testmod() testa()
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int: A: Any = 1 A: Optional[Any] = 0 for divide_by_number in range(__lowercase , digit + 1 ): A: list[int] = [] A: List[Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__lowercase ): A: Any = len(__lowercase ) A: Dict = divide_by_number else: has_been_divided.append(__lowercase ) A: str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Any ) -> int: '''simple docstring''' super().tearDown() gc.collect() def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' A: Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) A: List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) A: List[str] = '''xvjiarui/stable-diffusion-2-inpainting''' A , A: List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ ) A: Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench''' A: Optional[int] = jax.random.PRNGKey(0 ) A: Dict = 50 A: List[str] = jax.device_count() A: Optional[Any] = num_samples * [prompt] A: Optional[Any] = num_samples * [init_image] A: int = num_samples * [mask_image] A , A , A: int = pipeline.prepare_inputs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # shard inputs and rng A: List[Any] = replicate(SCREAMING_SNAKE_CASE_ ) A: List[str] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() ) A: int = shard(SCREAMING_SNAKE_CASE_ ) A: str = shard(SCREAMING_SNAKE_CASE_ ) A: Dict = shard(SCREAMING_SNAKE_CASE_ ) A: Tuple = pipeline( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , jit=SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = output.images.reshape(SCREAMING_SNAKE_CASE_ , 5_12 , 5_12 , 3 ) A: Dict = images[0, 2_53:2_56, 2_53:2_56, -1] A: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) ) A: Optional[int] = jnp.array( [0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
319
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger UpperCamelCase = get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. ''' class lowerCAmelCase_ : '''simple docstring''' @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray ) -> jnp.ndarray: '''simple docstring''' raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class lowerCAmelCase_ : '''simple docstring''' @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray ) -> jnp.ndarray: '''simple docstring''' raise NotImplementedError( f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @add_start_docstrings(SCREAMING_SNAKE_CASE_ ) def __call__( self : str , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' for processor in self: A: Dict = inspect.signature(processor.__call__ ).parameters if len(SCREAMING_SNAKE_CASE_ ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f"""Make sure that all the required parameters: {list(function_args.keys() )} for """ f"""{processor.__class__} are passed to the logits processor.""" ) A: str = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) else: A: Optional[Any] = processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : float ) -> Tuple: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not (temperature > 0): raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" ) A: List[Any] = temperature def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A: List[str] = scores / self.temperature return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = -float('''Inf''' ) , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Union[str, Any]: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (top_p < 0 or top_p > 1.0): raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (min_tokens_to_keep < 1): raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" ) A: Optional[Any] = top_p A: Dict = filter_value A: Dict = min_tokens_to_keep def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A , A: Tuple = lax.top_k(SCREAMING_SNAKE_CASE_ , scores.shape[-1] ) A: List[str] = jnp.full_like(SCREAMING_SNAKE_CASE_ , self.filter_value ) A: Optional[int] = jax.nn.softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ).cumsum(axis=-1 ) A: List[str] = cumulative_probs < self.top_p # include the token that is higher than top_p as well A: List[str] = jnp.roll(SCREAMING_SNAKE_CASE_ , 1 ) score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE_ ) # min tokens to keep A: str = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE_ ) A: Optional[int] = jnp.where(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[-1] return next_scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = -float('''Inf''' ) , SCREAMING_SNAKE_CASE_ : int = 1 ) -> Dict: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or top_k <= 0: raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" ) A: str = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Any = filter_value def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A , A: List[Any] = scores.shape A: Tuple = jnp.full(batch_size * vocab_size , self.filter_value ) A: Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check A , A: Optional[int] = lax.top_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A: Any = topk_scores.flatten() A: List[str] = topk_indices.flatten() + shift A: List[str] = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE_ ) A: Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return next_scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]: '''simple docstring''' A: Tuple = bos_token_id def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A: Any = jnp.full(scores.shape , -float('''inf''' ) ) A: str = 1 - jnp.bool_(cur_len - 1 ) A: Union[str, Any] = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Any: '''simple docstring''' A: str = max_length A: Optional[Any] = eos_token_id def __call__( self : str , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A: Tuple = jnp.full(scores.shape , -float('''inf''' ) ) A: Optional[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A: Tuple = jnp.where(SCREAMING_SNAKE_CASE_ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE_ ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or min_length < 0: raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" ) if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or eos_token_id < 0: raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" ) A: Union[str, Any] = min_length A: List[Any] = eos_token_id def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A: Optional[int] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A: Dict = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple: '''simple docstring''' A: Tuple = list(SCREAMING_SNAKE_CASE_ ) A: str = begin_index def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]: '''simple docstring''' A: str = 1 - jnp.bool_(cur_len - self.begin_index ) A: Tuple = jnp.where(SCREAMING_SNAKE_CASE_ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list ) -> str: '''simple docstring''' A: int = list(SCREAMING_SNAKE_CASE_ ) def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' A: List[str] = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = dict(SCREAMING_SNAKE_CASE_ ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A: Dict = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A: Union[str, Any] = force_token_array.at[index].set(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = jnp.intaa(SCREAMING_SNAKE_CASE_ ) def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int ) -> jnp.ndarray: '''simple docstring''' def _force_token(SCREAMING_SNAKE_CASE_ : Optional[Any] ): A: int = scores.shape[0] A: str = self.force_token_array[generation_idx] A: Any = jnp.ones_like(SCREAMING_SNAKE_CASE_ , dtype=scores.dtype ) * -float('''inf''' ) A: str = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A: List[Any] = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (0, current_token) ) return new_scores A: Optional[Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE_ ) , lambda: scores , ) , ) return scores class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ) -> Any: '''simple docstring''' A: Optional[int] = generate_config.eos_token_id A: Dict = generate_config.no_timestamps_token_id A: List[str] = generate_config.no_timestamps_token_id + 1 A: Optional[int] = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(SCREAMING_SNAKE_CASE_ , '''max_initial_timestamp_index''' ): A: Tuple = generate_config.max_initial_timestamp_index else: A: Any = model_config.vocab_size if self.max_initial_timestamp_index is None: A: Tuple = model_config.vocab_size def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]: '''simple docstring''' A: Any = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) ) def handle_pairs(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] ): A: int = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE_ , ) A: str = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) return jnp.where( SCREAMING_SNAKE_CASE_ , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , SCREAMING_SNAKE_CASE_ , ) A: Union[str, Any] = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: str = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE_ , ) A: List[Any] = self.timestamp_begin + self.max_initial_timestamp_index A: List[Any] = jnp.where( SCREAMING_SNAKE_CASE_ , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ , ) # if sum of probability over timestamps is above any other token, sample timestamp A: Optional[Any] = jax.nn.log_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 ) def handle_cumulative_probs(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int ): A: Optional[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A: Optional[int] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , SCREAMING_SNAKE_CASE_ , ) A: Dict = jax.vmap(SCREAMING_SNAKE_CASE_ )(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return scores
319
'''simple docstring''' import fire from utils import calculate_rouge, save_json def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: A: Any = [x.strip() for x in open(__lowercase ).readlines()] A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )] A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase ) if save_path is not None: save_json(__lowercase , __lowercase , indent=__lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
319
1
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if len(__lowercase ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) A: Any = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
1
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = Dict[str, Any] UpperCamelCase = List[Prediction] @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Any = {} if "threshold" in kwargs: A: List[Any] = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: int = load_image(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = torch.IntTensor([[image.height, image.width]] ) A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) A: Any = target_size return inputs def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' A: Tuple = model_inputs.pop('''target_size''' ) A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ ) A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: A: Dict = model_inputs['''bbox'''] return model_outputs def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A , A: Union[str, Any] = target_size[0].tolist() def unnormalize(SCREAMING_SNAKE_CASE_ : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )] A: Dict = ['''score''', '''label''', '''box'''] A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = raw_annotations[0] A: List[Any] = raw_annotation['''scores'''] A: List[Any] = raw_annotation['''labels'''] A: int = raw_annotation['''boxes'''] A: Any = scores.tolist() A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels] A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A: Tuple = ['''score''', '''label''', '''box'''] A: str = [ dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) A , A , A , A: str = box.int().tolist() A: str = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
319
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: List[Any] = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]: A: Tuple = OrderedDict() A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A: Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: int = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[int] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 5_1_2} A: List[str] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: List[str] = {'''visual_embedding_dim''': 2_0_4_8} A: Optional[int] = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: A: Optional[int] = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: str = '''nlvr''' A: Union[str, Any] = VisualBertConfig(**__lowercase ) # Load State Dict A: Union[str, Any] = load_state_dict(__lowercase ) A: str = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: Optional[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Any = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
1
'''simple docstring''' from functools import lru_cache @lru_cache def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if num < 0: raise ValueError('''Number should not be negative.''' ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
'''simple docstring''' from itertools import permutations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A: int = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__lowercase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int: return sum( int(''''''.join(map(__lowercase , __lowercase ) ) ) for num in permutations(range(__lowercase ) ) if is_substring_divisible(__lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
319
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): raise TypeError('''only integers accepted as input''' ) else: A: str = str(abs(__lowercase ) ) A: int = [list(__lowercase ) for char in range(len(__lowercase ) )] for index in range(len(__lowercase ) ): num_transpositions[index].pop(__lowercase ) return max( int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
319
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: from transformers.testing_utils import pytest_terminal_summary_main A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: A: Tuple = 0 # Doctest custom flag to ignore output. UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''') UpperCamelCase = doctest.OutputChecker class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase = CustomOutputChecker UpperCamelCase = HfDoctestModule UpperCamelCase = HfDocTestParser
319
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): raise TypeError('''only integers accepted as input''' ) else: A: str = str(abs(__lowercase ) ) A: int = [list(__lowercase ) for char in range(len(__lowercase ) )] for index in range(len(__lowercase ) ): num_transpositions[index].pop(__lowercase ) return max( int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
319
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = """timm_backbone""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> List[str]: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) A: str = backbone A: Optional[int] = num_channels A: List[Any] = features_only A: int = use_pretrained_backbone A: Union[str, Any] = True A: Any = out_indices if out_indices is not None else (-1,)
319
'''simple docstring''' from __future__ import annotations import math def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) A: str = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]: if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) A: Union[str, Any] = len(__lowercase ) A: str = matrix_length // 2 A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )] A: Optional[Any] = [ [a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase ) ] A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )] A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )] return top_left, top_right, bot_left, bot_right def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]: return len(__lowercase ), len(matrix[0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> None: print('''\n'''.join(str(__lowercase ) for line in matrix ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase ) == (2, 2): return default_matrix_multiplication(__lowercase , __lowercase ) A , A , A , A: Union[str, Any] = split_matrix(__lowercase ) A , A , A , A: List[Any] = split_matrix(__lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) A: Any = matrix_addition(__lowercase , __lowercase ) A: List[Any] = matrix_addition(__lowercase , __lowercase ) A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) # construct the new matrix from our 4 quadrants A: Union[str, Any] = [] for i in range(len(__lowercase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__lowercase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]: A: int = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(__lowercase ) A: str = matrix_dimensions(__lowercase ) A: str = matrix_dimensions(__lowercase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] A: Union[str, Any] = max(*__lowercase , *__lowercase ) A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) ) A: List[Any] = matrixa A: Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) A: Any = actual_strassen(__lowercase , __lowercase ) # Removing the additional zeros for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": UpperCamelCase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
319
1
'''simple docstring''' from collections.abc import Iterable from typing import Generic, TypeVar UpperCamelCase = TypeVar('''_T''') class lowerCAmelCase_ ( Generic[_T] ): '''simple docstring''' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Iterable[_T] | None = None ) -> None: '''simple docstring''' A: list[_T] = list(iterable or [] ) A: list[_T] = [] def __len__( self : List[str] ) -> int: '''simple docstring''' return len(self._stacka ) + len(self._stacka ) def __repr__( self : int ) -> str: '''simple docstring''' return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : _T ) -> None: '''simple docstring''' self._stacka.append(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> _T: '''simple docstring''' A: str = self._stacka.pop A: Optional[Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('''Queue is empty''' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
319
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = 2 @register_to_config def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = sigma_max # setable values A: int = None A: np.IntTensor = None A: torch.FloatTensor = None # sigma(t_i) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' A: List[Any] = num_inference_steps A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) A: str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A: List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) A: Optional[Any] = sigma + gamma * sigma A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: Union[str, Any] = sample_hat + sigma_hat * model_output A: str = (sample_hat - pred_original_sample) / sigma_hat A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: int = sample_prev + sigma_prev * model_output A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' raise NotImplementedError()
319
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable UpperCamelCase = list[list[float | int]] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Matrix: A: int = len(__lowercase ) A: Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowercase )] A: int A: int A: int A: int A: int A: float for row in range(__lowercase ): for col in range(__lowercase ): A: Optional[int] = matrix[row][col] A: Any = vector[row][0] A: Any = 0 A: int = 0 while row < size and col < size: # pivoting A: str = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowercase , __lowercase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: A , A: Dict = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , __lowercase ): A: Optional[Any] = augmented[rowa][col] / augmented[row][col] A: Tuple = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , __lowercase ): for row in range(__lowercase ): A: Tuple = augmented[row][col] / augmented[col][col] for cola in range(__lowercase , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(__lowercase ) ] def SCREAMING_SNAKE_CASE( __lowercase ) -> Callable[[int], int]: A: int = len(__lowercase ) A: Matrix = [[0 for _ in range(__lowercase )] for _ in range(__lowercase )] A: Matrix = [[0] for _ in range(__lowercase )] A: Matrix A: int A: int A: int for x_val, y_val in enumerate(__lowercase ): for col in range(__lowercase ): A: List[Any] = (x_val + 1) ** (size - col - 1) A: Tuple = y_val A: Dict = solve(__lowercase , __lowercase ) def interpolated_func(__lowercase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowercase ) ) return interpolated_func def SCREAMING_SNAKE_CASE( __lowercase ) -> int: return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**1_0 ) def SCREAMING_SNAKE_CASE( __lowercase = question_function , __lowercase = 1_0 ) -> int: A: list[int] = [func(__lowercase ) for x_val in range(1 , order + 1 )] A: list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] A: int = 0 A: Callable[[int], int] A: int for poly in polynomials: A: int = 1 while func(__lowercase ) == poly(__lowercase ): x_val += 1 ret += poly(__lowercase ) return ret if __name__ == "__main__": print(f'{solution() = }')
319
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if self.train_file is not None: A: Tuple = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A: str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())] assert len(__lowercase ) == len(__lowercase ) A: Optional[int] = {c: dataset[c] for c in dataset.column_names} A: Union[str, Any] = refs return Dataset.from_dict(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A: Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) A: Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: A: Any = {} if data_args.train_file is not None: A: int = data_args.train_file if data_args.validation_file is not None: A: Optional[int] = data_args.validation_file A: List[str] = data_args.train_file.split('''.''' )[-1] if extension == "txt": A: int = '''text''' A: Any = load_dataset(__lowercase , data_files=__lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A: Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: A: str = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) A: Tuple = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase ) elif model_args.model_name_or_path: A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: A: List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A: int = datasets['''train'''].column_names else: A: str = datasets['''validation'''].column_names A: Tuple = '''text''' if '''text''' in column_names else column_names[0] A: List[str] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__lowercase ): # Remove empty lines A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length ) A: str = datasets.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A: Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A: List[Any] = False # Data collator # This one will take care of randomly masking the tokens. A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: A: Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A: str = model_args.model_name_or_path else: A: List[str] = None A: str = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation A: Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A: Optional[Any] = trainer.evaluate() A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) A: Dict = perplexity A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
319
1
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = XLMProphetNetTokenizer UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : List[Any] = True def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A: Union[str, Any] = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self : Any ) -> str: '''simple docstring''' A: Any = '''[PAD]''' A: Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] ) -> Dict: '''simple docstring''' A: Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''[PAD]''' ) self.assertEqual(vocab_keys[1] , '''[CLS]''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 10_12 ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_12 ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[int] = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) A: str = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A: Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A: str = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) A: List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''[UNK]''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''[UNK]''', '''.''', ] , ) @cached_property def _snake_case ( self : Any ) -> Any: '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' ) @slow def _snake_case ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' A: Union[str, Any] = '''Hello World!''' A: List[Any] = [3_53_89, 66_72, 49, 2] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def _snake_case ( self : Optional[Any] ) -> str: '''simple docstring''' A: Optional[int] = {'''input_ids''': [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
319
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : Tuple = False def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() A: Optional[int] = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: A: int = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: A: Dict = toks + toks # toks_str = [t[1] for t in toks] A: Union[str, Any] = [t[0] for t in toks] # Ensure consistency A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: A: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: A: Tuple = ''' ''' + output_txt A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Any = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[str] = '''Hello how are you''' A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Optional[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A: List[str] = tokenizer.decode(sample_ids[0] ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: List[Any] = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Optional[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off A: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A: Tuple = tokenizer.decode(sample_ids[0] ) A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _snake_case ( self : int ) -> List[str]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Union[str, Any] = '''Hello how are you''' A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Any = '''Hello how are you''' A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''Hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: str = '''Hello how Are you''' A: Union[str, Any] = '''hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off A: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Any = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: str = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) ) # transform list to ModelOutput A: Dict = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): [recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off A: int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _snake_case ( self : int ) -> int: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _snake_case ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' pass def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: str = tokenizer.vocab_size A: str = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) ) A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) ) A: int = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass def _snake_case ( self : str ) -> Tuple: '''simple docstring''' A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = LongformerTokenizer UpperCamelCase_ : List[str] = True UpperCamelCase_ : Any = LongformerTokenizerFast UpperCamelCase_ : Dict = True def _snake_case ( self : Optional[Any] ) -> str: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] A: Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A: str = {'''unk_token''': '''<unk>'''} A: Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def _snake_case ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]: '''simple docstring''' A: str = '''lower newer''' A: int = '''lower newer''' return input_text, output_text def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' A: Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A: List[str] = '''lower newer''' A: int = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] A: Any = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) # , add_prefix_space=True) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokens + [tokenizer.unk_token] A: Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : str ) -> int: '''simple docstring''' A: Optional[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _snake_case ( self : Any ) -> str: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) A: Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: int = tokenizer.encode( '''sequence builders''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) A: List[str] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' A: Any = self.get_tokenizer() A: Tuple = '''Encode this sequence.''' A: List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: str = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) A: List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Testing spaces after special tokens A: Optional[int] = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ )} ) # mask token has a left space A: Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = '''Encode <mask> sequence''' A: List[Any] = '''Encode <mask>sequence''' A: Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = encoded.index(SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) A: Tuple = encoded.index(SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Tuple: '''simple docstring''' pass def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A: int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) A: Optional[int] = '''A, <mask> AllenNLP sentence.''' A: Any = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) A: Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) A: Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def _snake_case ( self : Tuple ) -> str: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A: List[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A: Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state['''add_prefix_space'''] , SCREAMING_SNAKE_CASE_ ) self.assertEqual(post_processor_state['''trim_offsets'''] , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> str: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): A: Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` A: Dict = f"""{text_of_1_token} {text_of_1_token}""" A: Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) A: Dict = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: Dict = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ) + 1, len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) A: int = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: int = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) A: Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE_ ), len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) A: Dict = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A: Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ) + 1, 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) A: Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: List[str] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , ) A: Optional[Any] = self.rust_tokenizer_class.from_pretrained( SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ ) A: List[str] = tokenizer_r(SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(SCREAMING_SNAKE_CASE_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE_ ), 1 + len(SCREAMING_SNAKE_CASE_ ) + 1 + len(SCREAMING_SNAKE_CASE_ )) , )
319
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
319
1
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 UpperCamelCase = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''') UpperCamelCase = get_tests_dir('''fixtures/vocab.json''') UpperCamelCase = get_tests_dir('''fixtures''') class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def _snake_case ( self : str ) -> Optional[Any]: '''simple docstring''' A: Union[str, Any] = 0 def _snake_case ( self : Dict ) -> Dict: '''simple docstring''' A: Optional[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A: List[Any] = WavaVecaConfig() A: List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Dict = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[Any] ) -> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) copyfile(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.json''' ) ) A: str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Tuple ) -> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A: Dict = WavaVecaFeatureExtractor() A: List[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A: str = WavaVecaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''r''' ) as f: A: Dict = json.load(SCREAMING_SNAKE_CASE_ ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) ) A: List[str] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A: str = WavaVecaFeatureExtractor() A: str = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A: int = WavaVecaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''r''' ) as f: A: Tuple = json.load(SCREAMING_SNAKE_CASE_ ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE_ ) ) A: Optional[int] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A: Dict = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' ) as f: f.write('''{}''' ) A: Optional[int] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Any ) -> Dict: '''simple docstring''' with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE_ ): A: int = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A: int = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A: Any = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A: List[str] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ , use_fast=SCREAMING_SNAKE_CASE_ ) A: Union[str, Any] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def _snake_case ( self : int ) -> Tuple: '''simple docstring''' try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ ) AutoProcessor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE_ ): AutoProcessor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Now that the config is registered, it can be used as any other config with the auto-API A: Union[str, Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: A: Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A: Any = CustomTokenizer(SCREAMING_SNAKE_CASE_ ) A: int = CustomProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) A: Dict = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self : Dict ) -> Tuple: '''simple docstring''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = False class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : List[Any] = False class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : str = """AutoFeatureExtractor""" UpperCamelCase_ : Tuple = """AutoTokenizer""" UpperCamelCase_ : Optional[Any] = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE_ ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) AutoTokenizer.register(SCREAMING_SNAKE_CASE_ , slow_tokenizer_class=SCREAMING_SNAKE_CASE_ ) AutoProcessor.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # If remote code is not set, the default is to use local classes. A: Dict = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A: Optional[int] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A: Any = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def _snake_case ( self : Optional[Any] ) -> Any: '''simple docstring''' A: Dict = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def _snake_case ( self : Any ) -> Any: '''simple docstring''' A: List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def _snake_case ( cls : Tuple ) -> Any: '''simple docstring''' A: Optional[int] = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def _snake_case ( cls : str ) -> str: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def _snake_case ( self : Dict ) -> Tuple: '''simple docstring''' A: int = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE_ , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) A: Dict = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _snake_case ( self : Union[str, Any] ) -> int: '''simple docstring''' A: Optional[Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE_ , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token , organization='''valid_org''' , ) A: Optional[Any] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE_ ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def _snake_case ( self : List[str] ) -> Tuple: '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A: Union[str, Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: A: List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A: int = CustomTokenizer(SCREAMING_SNAKE_CASE_ ) A: List[Any] = CustomProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token ) A: List[Any] = Repository(SCREAMING_SNAKE_CASE_ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''tokenizer_config.json''' ) ) as f: A: str = json.load(SCREAMING_SNAKE_CASE_ ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , '''custom_processing.py''' ) ) ) repo.push_to_hub() A: Optional[Any] = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=SCREAMING_SNAKE_CASE_ ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
319
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports UpperCamelCase = ''' import os ''' UpperCamelCase = ''' def foo(): import os return False ''' UpperCamelCase = ''' def foo(): def bar(): if True: import os return False return bar() ''' UpperCamelCase = ''' import os try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except ImportError as e: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' UpperCamelCase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Tuple = os.path.join(__lowercase , '''test_file.py''' ) with open(__lowercase , '''w''' ) as _tmp_file: _tmp_file.write(__lowercase ) A: List[Any] = get_imports(__lowercase ) assert parsed_imports == ["os"]
319
1
'''simple docstring''' # Lint as: python3 import itertools import os import re UpperCamelCase = re.compile(R'''([A-Z]+)([A-Z][a-z])''') UpperCamelCase = re.compile(R'''([a-z\d])([A-Z])''') UpperCamelCase = re.compile(R'''(?<!_)_(?!_)''') UpperCamelCase = re.compile(R'''(_{2,})''') UpperCamelCase = R'''^\w+(\.\w+)*$''' UpperCamelCase = R'''<>:/\|?*''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: A: str = _uppercase_uppercase_re.sub(r'''\1_\2''' , __lowercase ) A: int = _lowercase_uppercase_re.sub(r'''\1_\2''' , __lowercase ) return name.lower() def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: str = _single_underscore_re.split(__lowercase ) A: Optional[int] = [_multiple_underscores_re.split(__lowercase ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__lowercase ) if n != '''''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> List[str]: if os.path.basename(__lowercase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: if os.path.basename(__lowercase ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , __lowercase ): raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" ) return F"""{filename_prefix_for_name(__lowercase )}-{split}""" def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None ) -> List[str]: A: Any = filename_prefix_for_split(__lowercase , __lowercase ) if filetype_suffix: prefix += F""".{filetype_suffix}""" A: Union[str, Any] = os.path.join(__lowercase , __lowercase ) return F"""{filepath}*""" def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None ) -> Optional[Any]: A: int = filename_prefix_for_split(__lowercase , __lowercase ) A: List[Any] = os.path.join(__lowercase , __lowercase ) if shard_lengths: A: int = len(__lowercase ) A: List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(__lowercase )] if filetype_suffix: A: int = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: A: Optional[Any] = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
319
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=False ) -> Optional[Any]: A: str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''), ( '''text_embeddings.position_embeddings.weight''', '''vilt.embeddings.text_embeddings.position_embeddings.weight''', ), ('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''), ( '''text_embeddings.token_type_embeddings.weight''', '''vilt.embeddings.text_embeddings.token_type_embeddings.weight''', ), ('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''), ('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''), # patch embeddings ('''transformer.cls_token''', '''vilt.embeddings.cls_token'''), ('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''), ('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''), ('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''), # token type embeddings ('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''), ] ) # final layernorm + pooler rename_keys.extend( [ ('''transformer.norm.weight''', '''vilt.layernorm.weight'''), ('''transformer.norm.bias''', '''vilt.layernorm.bias'''), ('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''), ('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('''vqa_classifier.0.weight''', '''classifier.0.weight'''), ('''vqa_classifier.0.bias''', '''classifier.0.bias'''), ('''vqa_classifier.1.weight''', '''classifier.1.weight'''), ('''vqa_classifier.1.bias''', '''classifier.1.bias'''), ('''vqa_classifier.3.weight''', '''classifier.3.weight'''), ('''vqa_classifier.3.bias''', '''classifier.3.bias'''), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''), ('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''), ('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''), ('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''), ('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''), ('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''), ] ) else: pass return rename_keys def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: for i in range(config.num_hidden_layers ): A: Tuple = '''vilt.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A: List[str] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" ) A: Optional[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict A: Dict = in_proj_weight[ : config.hidden_size, : ] A: int = in_proj_bias[: config.hidden_size] A: Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A: int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A: Optional[int] = in_proj_weight[ -config.hidden_size :, : ] A: Optional[Any] = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE( __lowercase ) -> int: A: Optional[int] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: List[Any] = dct.pop(__lowercase ) A: int = val @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> str: A: Optional[Any] = ViltConfig(image_size=3_8_4 , patch_size=3_2 , tie_word_embeddings=__lowercase ) A: Tuple = False A: str = False A: List[Any] = False A: Optional[int] = False if "vqa" in checkpoint_url: A: Union[str, Any] = True A: Union[str, Any] = 3_1_2_9 A: List[Any] = '''huggingface/label-files''' A: Any = '''vqa2-id2label.json''' A: Optional[Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()} A: Any = idalabel A: Optional[Any] = {v: k for k, v in idalabel.items()} A: List[str] = ViltForQuestionAnswering(__lowercase ) elif "nlvr" in checkpoint_url: A: Dict = True A: str = 2 A: Union[str, Any] = {0: '''False''', 1: '''True'''} A: Any = {v: k for k, v in config.idalabel.items()} A: Optional[Any] = 3 A: Any = ViltForImagesAndTextClassification(__lowercase ) elif "irtr" in checkpoint_url: A: Tuple = True A: Optional[Any] = ViltForImageAndTextRetrieval(__lowercase ) elif "mlm_itm" in checkpoint_url: A: Tuple = True A: Optional[int] = ViltForMaskedLM(__lowercase ) else: raise ValueError('''Unknown model type''' ) # load state_dict of original model, remove and rename some keys A: int = torch.hub.load_state_dict_from_url(__lowercase , map_location='''cpu''' )['''state_dict'''] A: List[str] = create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase ) for src, dest in rename_keys: rename_key(__lowercase , __lowercase , __lowercase ) read_in_q_k_v(__lowercase , __lowercase ) if mlm_model or irtr_model: A: str = ['''itm_score.fc.weight''', '''itm_score.fc.bias'''] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) # load state dict into HuggingFace model model.eval() if mlm_model: A , A: Union[str, Any] = model.load_state_dict(__lowercase , strict=__lowercase ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(__lowercase ) # Define processor A: Optional[Any] = ViltImageProcessor(size=3_8_4 ) A: Dict = BertTokenizer.from_pretrained('''bert-base-uncased''' ) A: Optional[int] = ViltProcessor(__lowercase , __lowercase ) # Forward pass on example inputs (image + text) if nlvr_model: A: str = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: List[str] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__lowercase ).raw ) A: Any = ( '''The left image contains twice the number of dogs as the right image, and at least two dogs in total are''' ''' standing.''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: List[str] = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: A: Any = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__lowercase ).raw ) if mlm_model: A: Optional[int] = '''a bunch of [MASK] laying on a [MASK].''' else: A: Optional[int] = '''How many cats are there?''' A: Union[str, Any] = processor(__lowercase , __lowercase , return_tensors='''pt''' ) A: Any = model(**__lowercase ) # Verify outputs if mlm_model: A: Any = torch.Size([1, 1_1, 3_0_5_2_2] ) A: Tuple = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify masked token prediction equals "cats" A: List[str] = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: A: Any = torch.Size([1, 3_1_2_9] ) A: Optional[int] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1E-4 ) # verify vqa prediction equals "2" A: Dict = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: A: Union[str, Any] = torch.Size([1, 2] ) A: Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] ) assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(F"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
319
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = '''char''' __snake_case = '''bpe''' __snake_case = '''wp''' UpperCAmelCase__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = ['''image_processor''', '''char_tokenizer'''] __snake_case = '''ViTImageProcessor''' __snake_case = '''MgpstrTokenizer''' def __init__( self : Optional[int] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Optional[Any] ) ->Tuple: """simple docstring""" a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __UpperCAmelCase , ) a = kwargs.pop('''feature_extractor''' ) a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) a = tokenizer a = AutoTokenizer.from_pretrained('''gpt2''' ) a = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self : List[str] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : List[str] ) ->List[Any]: """simple docstring""" if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: a = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: a = encodings['''input_ids'''] return inputs def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[Any] ) ->List[Any]: """simple docstring""" a , a , a = sequences a = char_preds.size(0 ) a , a = self._decode_helper(__UpperCAmelCase , '''char''' ) a , a = self._decode_helper(__UpperCAmelCase , '''bpe''' ) a , a = self._decode_helper(__UpperCAmelCase , '''wp''' ) a = [] a = [] for i in range(__UpperCAmelCase ): a = [char_scores[i], bpe_scores[i], wp_scores[i]] a = [char_strs[i], bpe_strs[i], wp_strs[i]] a = scores.index(max(__UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) a = {} a = final_strs a = final_scores a = char_strs a = bpe_strs a = wp_strs return out def __lowerCAmelCase ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ) ->int: """simple docstring""" if format == DecodeType.CHARACTER: a = self.char_decode a = 1 a = '''[s]''' elif format == DecodeType.BPE: a = self.bpe_decode a = 2 a = '''#''' elif format == DecodeType.WORDPIECE: a = self.wp_decode a = 102 a = '''[SEP]''' else: raise ValueError(F"""Format {format} is not supported.""" ) a , a = [], [] a = pred_logits.size(0 ) a = pred_logits.size(1 ) a , a = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase ) a = preds_index.view(-1 , __UpperCAmelCase )[:, 1:] a = decoder(__UpperCAmelCase ) a , a = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 ) a = preds_max_prob[:, 1:] for index in range(__UpperCAmelCase ): a = preds_str[index].find(__UpperCAmelCase ) a = preds_str[index][:pred_eos] a = preds_index[index].cpu().tolist() a = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1 a = preds_max_prob[index][: pred_eos_index + 1] a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(__UpperCAmelCase ) conf_scores.append(__UpperCAmelCase ) return dec_strs, conf_scores def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Union[str, Any]: """simple docstring""" a = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[Any] ) ->List[Any]: """simple docstring""" return self.bpe_tokenizer.batch_decode(__UpperCAmelCase ) def __lowerCAmelCase ( self : Any , __UpperCAmelCase : List[Any] ) ->List[Any]: """simple docstring""" a = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )] return decode_strs
0
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } UpperCamelCase = { '''b0''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 224, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1280, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 240, '''dropout_rate''': 0.2, '''dw_padding''': [16], }, '''b2''': { '''hidden_dim''': 1408, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 260, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 16], }, '''b3''': { '''hidden_dim''': 1536, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 300, '''dropout_rate''': 0.3, '''dw_padding''': [5, 18], }, '''b4''': { '''hidden_dim''': 1792, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 380, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2048, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 456, '''dropout_rate''': 0.4, '''dw_padding''': [13, 27], }, '''b6''': { '''hidden_dim''': 2304, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 528, '''dropout_rate''': 0.5, '''dw_padding''': [31], }, '''b7''': { '''hidden_dim''': 2560, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 600, '''dropout_rate''': 0.5, '''dw_padding''': [18], }, } def SCREAMING_SNAKE_CASE( __lowercase ) -> Dict: A: Tuple = EfficientNetConfig() A: Optional[int] = CONFIG_MAP[model_name]['''hidden_dim'''] A: Optional[int] = CONFIG_MAP[model_name]['''width_coef'''] A: str = CONFIG_MAP[model_name]['''depth_coef'''] A: Dict = CONFIG_MAP[model_name]['''image_size'''] A: str = CONFIG_MAP[model_name]['''dropout_rate'''] A: Optional[Any] = CONFIG_MAP[model_name]['''dw_padding'''] A: Optional[Any] = '''huggingface/label-files''' A: List[str] = '''imagenet-1k-id2label.json''' A: Dict = 1_0_0_0 A: Any = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) ) A: Tuple = {int(__lowercase ): v for k, v in idalabel.items()} A: int = idalabel A: Tuple = {v: k for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE( ) -> Any: A: Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A: Union[str, Any] = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: List[str] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = EfficientNetImageProcessor( size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , ) return preprocessor def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: List[str] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] A: List[str] = sorted(set(__lowercase ) ) A: Dict = len(__lowercase ) A: List[str] = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )} A: Optional[int] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: A: int = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) A: Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: A: str = '''efficientnet.''' + item[1] A: int = '''classifier.weight''' A: Tuple = '''classifier.bias''' return key_mapping def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue A: Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: A: List[str] = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A: List[Any] = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A: Optional[Any] = torch.from_numpy(np.transpose(__lowercase ) ) else: A: Any = torch.from_numpy(__lowercase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__lowercase ) @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple: A: Optional[int] = model_classes[model_name]( include_top=__lowercase , weights='''imagenet''' , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=1_0_0_0 , classifier_activation='''softmax''' , ) A: List[str] = original_model.trainable_variables A: Optional[Any] = original_model.non_trainable_variables A: Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A: int = param.numpy() A: Tuple = list(tf_params.keys() ) # Load HuggingFace model A: Dict = get_efficientnet_config(__lowercase ) A: Union[str, Any] = EfficientNetForImageClassification(__lowercase ).eval() A: Dict = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) A: int = rename_keys(__lowercase ) replace_params(__lowercase , __lowercase , __lowercase ) # Initialize preprocessor and preprocess input image A: List[Any] = convert_image_processor(__lowercase ) A: Optional[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): A: str = hf_model(**__lowercase ) A: List[Any] = outputs.logits.detach().numpy() # Original model inference A: Any = False A: List[Any] = CONFIG_MAP[model_name]['''image_size'''] A: List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A: str = image.img_to_array(__lowercase ) A: Dict = np.expand_dims(__lowercase , axis=0 ) A: Any = original_model.predict(__lowercase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__lowercase , __lowercase , atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(__lowercase ): os.mkdir(__lowercase ) # Save converted model and image processor hf_model.save_pretrained(__lowercase ) preprocessor.save_pretrained(__lowercase ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) A: int = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(__lowercase ) hf_model.push_to_hub(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') UpperCamelCase = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
319
0
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> str: '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = np.max(_outputs , axis=-1 , keepdims=snake_case_ ) UpperCAmelCase_ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case_ ) class __A ( UpperCamelCase__ ): a__ : List[str] = """sigmoid""" a__ : Tuple = """softmax""" a__ : Union[str, Any] = """none""" @add_end_docstrings( UpperCamelCase__ , r""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class __A ( UpperCamelCase__ ): a__ : Optional[Any] = False a__ : List[str] = ClassificationFunction.NONE def __init__(self : Dict , **__a : Any ): super().__init__(**__a ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _lowercase (self : Dict , __a : Dict=None , __a : Any=None , __a : int="" , **__a : Dict ): # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" UpperCAmelCase_ = tokenizer_kwargs UpperCAmelCase_ = {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: UpperCAmelCase_ = self.model.config.return_all_scores if isinstance(__a , __a ) or top_k is None: UpperCAmelCase_ = top_k UpperCAmelCase_ = False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __a , ) if return_all_scores: UpperCAmelCase_ = None else: UpperCAmelCase_ = 1 if isinstance(__a , __a ): UpperCAmelCase_ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: UpperCAmelCase_ = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Optional[int] , *__a : List[Any] , **__a : Tuple ): UpperCAmelCase_ = super().__call__(*__a , **__a ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. UpperCAmelCase_ = "top_k" not in kwargs if isinstance(args[0] , __a ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _lowercase (self : Optional[Any] , __a : Dict , **__a : List[str] ): UpperCAmelCase_ = self.framework if isinstance(__a , __a ): return self.tokenizer(**__a , return_tensors=__a , **__a ) elif isinstance(__a , __a ) and len(__a ) == 1 and isinstance(inputs[0] , __a ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__a , **__a ) elif isinstance(__a , __a ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(__a , return_tensors=__a , **__a ) def _lowercase (self : Dict , __a : Optional[Any] ): return self.model(**__a ) def _lowercase (self : Dict , __a : int , __a : int=None , __a : List[str]=1 , __a : List[str]=True ): # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: UpperCAmelCase_ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: UpperCAmelCase_ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: UpperCAmelCase_ = self.model.config.function_to_apply else: UpperCAmelCase_ = ClassificationFunction.NONE UpperCAmelCase_ = model_outputs["logits"][0] UpperCAmelCase_ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: UpperCAmelCase_ = sigmoid(__a ) elif function_to_apply == ClassificationFunction.SOFTMAX: UpperCAmelCase_ = softmax(__a ) elif function_to_apply == ClassificationFunction.NONE: UpperCAmelCase_ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} UpperCAmelCase_ = [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__a ) ] if not _legacy: dict_scores.sort(key=lambda __a : x["score"] , reverse=__a ) if top_k is not None: UpperCAmelCase_ = dict_scores[:top_k] return dict_scores
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
0
'''simple docstring''' import operator as op lowerCamelCase : Dict = 'scaler.pt' lowerCamelCase : Optional[Any] = 'pytorch_model' lowerCamelCase : List[Any] = 'random_states' lowerCamelCase : Union[str, Any] = 'optimizer' lowerCamelCase : str = 'scheduler' lowerCamelCase : int = 'pytorch_model.bin' lowerCamelCase : Optional[Any] = 'pytorch_model.bin.index.json' lowerCamelCase : List[Any] = 'model.safetensors' lowerCamelCase : Any = 'model.safetensors.index.json' lowerCamelCase : str = '1.10.2' lowerCamelCase : List[str] = 'py38' lowerCamelCase : List[Any] = '4.17.0' lowerCamelCase : Union[str, Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] lowerCamelCase : Optional[int] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] lowerCamelCase : Any = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] lowerCamelCase : Tuple = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] lowerCamelCase : Tuple = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] lowerCamelCase : Optional[int] = '2.0.1' lowerCamelCase : str = ['pdsh', 'standard', 'openmpi', 'mvapich'] lowerCamelCase : str = ['default', 'reduce-overhead', 'max-autotune'] lowerCamelCase : Optional[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCamelCase : List[Any] = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] lowerCamelCase : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] lowerCamelCase : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
2
'''simple docstring''' from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = Dict[str, Any] UpperCamelCase = List[Prediction] @add_end_docstrings(UpperCAmelCase_ ) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[str] ) -> int: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _snake_case ( self : int , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' A: Any = {} if "threshold" in kwargs: A: List[Any] = kwargs['''threshold'''] return {}, {}, postprocess_kwargs def __call__( self : str , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[Predictions, List[Prediction]]: '''simple docstring''' return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A: int = load_image(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = torch.IntTensor([[image.height, image.width]] ) A: Union[str, Any] = self.image_processor(images=[image] , return_tensors='''pt''' ) if self.tokenizer is not None: A: int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' ) A: Any = target_size return inputs def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: '''simple docstring''' A: Tuple = model_inputs.pop('''target_size''' ) A: Tuple = self.model(**SCREAMING_SNAKE_CASE_ ) A: List[str] = outputs.__class__({'''target_size''': target_size, **outputs} ) if self.tokenizer is not None: A: Dict = model_inputs['''bbox'''] return model_outputs def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=0.9 ) -> Union[str, Any]: '''simple docstring''' A: List[Any] = model_outputs['''target_size'''] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A , A: Union[str, Any] = target_size[0].tolist() def unnormalize(SCREAMING_SNAKE_CASE_ : str ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 10_00), (height * bbox[1] / 10_00), (width * bbox[2] / 10_00), (height * bbox[3] / 10_00), ] ) ) A , A: Dict = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A: List[str] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A: List[str] = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['''bbox'''].squeeze(0 )] A: Dict = ['''score''', '''label''', '''box'''] A: Optional[int] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A: Any = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: List[str] = raw_annotations[0] A: List[Any] = raw_annotation['''scores'''] A: List[Any] = raw_annotation['''labels'''] A: int = raw_annotation['''boxes'''] A: Any = scores.tolist() A: List[Any] = [self.model.config.idalabel[label.item()] for label in labels] A: List[Any] = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A: Tuple = ['''score''', '''label''', '''box'''] A: str = [ dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] ) ] return annotation def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ) -> Dict[str, int]: '''simple docstring''' if self.framework != "pt": raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' ) A , A , A , A: str = box.int().tolist() A: str = { '''xmin''': xmin, '''ymin''': ymin, '''xmax''': xmax, '''ymax''': ymax, } return bbox
319
0
'''simple docstring''' from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
3
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = """convbert""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]: '''simple docstring''' super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Dict = vocab_size A: Tuple = hidden_size A: Optional[int] = num_hidden_layers A: List[str] = num_attention_heads A: int = intermediate_size A: int = hidden_act A: List[str] = hidden_dropout_prob A: int = attention_probs_dropout_prob A: Tuple = max_position_embeddings A: Any = type_vocab_size A: str = initializer_range A: Union[str, Any] = layer_norm_eps A: str = embedding_size A: Optional[int] = head_ratio A: List[Any] = conv_kernel_size A: List[Any] = num_groups A: Optional[int] = classifier_dropout class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' @property def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A: List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
319
0
'''simple docstring''' import os __snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000} def a_ ( lowerCamelCase : str ): lowerCAmelCase = 0 lowerCAmelCase = 0 while index < len(lowerCamelCase ) - 1: lowerCAmelCase = SYMBOLS[numerals[index]] lowerCAmelCase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def a_ ( lowerCamelCase : int ): lowerCAmelCase = '' lowerCAmelCase = num // 1000 numerals += m_count * "M" num %= 1000 lowerCAmelCase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 lowerCAmelCase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def a_ ( lowerCamelCase : str = "/p089_roman.txt" ): lowerCAmelCase = 0 with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea: lowerCAmelCase = filea.readlines() for line in lines: lowerCAmelCase = line.strip() lowerCAmelCase = parse_roman_numerals(lowerCamelCase ) lowerCAmelCase = generate_roman_numerals(lowerCamelCase ) savings += len(lowerCamelCase ) - len(lowerCamelCase ) return savings if __name__ == "__main__": print(F'''{solution() = }''')
4
'''simple docstring''' from __future__ import annotations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if len(__lowercase ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) A: Any = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
319
0
from __future__ import annotations UpperCAmelCase__ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> tuple[list[list[int]], list[list[int]]]: """simple docstring""" _lowercase =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the reference grid _lowercase =1 _lowercase =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the action grid _lowercase =init[0] _lowercase =init[1] _lowercase =0 _lowercase =g + heuristic[x][y] # cost from starting cell to destination cell _lowercase =[[f, g, x, y]] _lowercase =False # flag that is set when search is complete _lowercase =False # flag set if we can't find expand while not found and not resign: if len(__snake_case ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() _lowercase =cell.pop() _lowercase =next_cell[2] _lowercase =next_cell[3] _lowercase =next_cell[1] if x == goal[0] and y == goal[1]: _lowercase =True else: for i in range(len(__snake_case ) ): # to try out different valid actions _lowercase =x + DIRECTIONS[i][0] _lowercase =y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: _lowercase =g + cost _lowercase =ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) _lowercase =1 _lowercase =i _lowercase =[] _lowercase =goal[0] _lowercase =goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: _lowercase =x - DIRECTIONS[action[x][y]][0] _lowercase =y - DIRECTIONS[action[x][y]][1] _lowercase =xa _lowercase =ya invpath.append([x, y] ) _lowercase =[] for i in range(len(__snake_case ) ): path.append(invpath[len(__snake_case ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase__ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase__ = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase__ = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase__ = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase__ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase__ = 99 UpperCAmelCase__ ,UpperCAmelCase__ = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
5
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase = abspath(join(dirname(__file__), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: from transformers.testing_utils import pytest_terminal_summary_main A: Optional[int] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(__lowercase , id=__lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Any: # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: A: Tuple = 0 # Doctest custom flag to ignore output. UpperCamelCase = doctest.register_optionflag('''IGNORE_RESULT''') UpperCamelCase = doctest.OutputChecker class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str: '''simple docstring''' if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCamelCase = CustomOutputChecker UpperCamelCase = HfDoctestModule UpperCamelCase = HfDocTestParser
319
0
from collections.abc import Callable import numpy as np def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> np.array: __a = int(np.ceil((x_end - xa) / step_size ) ) __a = np.zeros((n + 1,) ) __a = ya __a = xa for k in range(a__ ): __a = y[k] + step_size * ode_func(a__ , y[k] ) __a = y[k] + ( (step_size / 2) * (ode_func(a__ , y[k] ) + ode_func(x + step_size , a__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
6
'''simple docstring''' import heapq import sys import numpy as np UpperCamelCase = tuple[int, int] class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ) -> str: '''simple docstring''' A: Any = [] A: int = set() def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''' ) def _snake_case ( self : List[str] ) -> List[Any]: '''simple docstring''' return len(self.elements ) == 0 def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(SCREAMING_SNAKE_CASE_ ) else: # update # print("update", item) A: Optional[int] = [] ((A) , (A)): str = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((A) , (A)): int = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any: '''simple docstring''' if item in self.set: self.set.remove(SCREAMING_SNAKE_CASE_ ) A: str = [] ((A) , (A)): List[str] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((A) , (A)): Any = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return self.elements[0][1] def _snake_case ( self : int ) -> Union[str, Any]: '''simple docstring''' ((A) , (A)): Dict = heapq.heappop(self.elements ) self.set.remove(SCREAMING_SNAKE_CASE_ ) return (priority, item) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: # euclidean distance A: List[str] = np.array(__lowercase ) A: Optional[int] = np.array(__lowercase ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int: # integer division by time variable return consistent_heuristic(__lowercase , __lowercase ) // t def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]: A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase ) return ans def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]: A: Union[str, Any] = np.chararray((n, n) ) for i in range(__lowercase ): for j in range(__lowercase ): A: Union[str, Any] = '''*''' for i in range(__lowercase ): for j in range(__lowercase ): if (j, (n - 1) - i) in blocks: A: Optional[Any] = '''#''' A: Tuple = '''-''' A: List[str] = back_pointer[goal] while x != start: ((A) , (A)): Tuple = x # print(x) A: List[str] = '''-''' A: str = back_pointer[x] A: Dict = '''-''' for i in range(__lowercase ): for j in range(__lowercase ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A: List[str] = back_pointer[goal] while x != start: print(__lowercase , end=''' ''' ) A: Optional[int] = back_pointer[x] print(__lowercase ) sys.exit() def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]: for itera in range(__lowercase ): open_list[itera].remove_element(__lowercase ) # print("s", s) # print("j", j) ((A) , (A)): Tuple = s A: Optional[Any] = (x - 1, y) A: str = (x + 1, y) A: List[Any] = (x, y + 1) A: int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__lowercase ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__lowercase ) A: int = -1 A: int = float('''inf''' ) if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1: A: List[str] = g_function[s] + 1 A: List[str] = s if neighbours not in close_list_anchor: open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) ) if neighbours not in close_list_inad: for var in range(1 , __lowercase ): if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key( __lowercase , 0 , __lowercase , __lowercase ): open_list[j].put( __lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( ) -> Tuple: A: str = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(1_5 , 2_0 ): some_list.append((x, 1_7) ) for x in range(1_0 , 1_9 ): for y in range(1 , 1_5 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(1_2 , 1_9 ): some_list.append((x, y) ) for x in range(3 , 1_3 ): for y in range(1_6 , 1_9 ): some_list.append((x, y) ) return some_list UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCamelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCamelCase = make_common_ground() UpperCamelCase = blocks_blk # hyper parameters UpperCamelCase = 1 UpperCamelCase = 1 UpperCamelCase = 20 UpperCamelCase = 3 # one consistent and two other inconsistent # start and end destination UpperCamelCase = (0, 0) UpperCamelCase = (n - 1, n - 1) UpperCamelCase = 1 def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int: A: int = {start: 0, goal: float('''inf''' )} A: Union[str, Any] = {start: -1, goal: -1} A: List[Any] = [] A: Union[str, Any] = set() for i in range(__lowercase ): open_list.append(PriorityQueue() ) open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) ) A: list[int] = [] A: list[int] = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , __lowercase ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A , A: Union[str, Any] = open_list[i].top_show() visited.add(__lowercase ) expand_state( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_inad.append(__lowercase ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(__lowercase , __lowercase , __lowercase ) else: A: Union[str, Any] = open_list[0].top_show() visited.add(__lowercase ) expand_state( __lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) close_list_anchor.append(__lowercase ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__lowercase ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
319
0
from __future__ import annotations from scipy.special import comb # type: ignore class A : """simple docstring""" def __init__( self : Tuple,lowercase_ : list[tuple[float, float]] )-> Tuple: '''simple docstring''' A__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. A__ = len(lowercase_ ) - 1 def snake_case__ ( self : Dict,lowercase_ : float )-> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." A__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree,lowercase_ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(lowercase_ ),5 ) == 1 return output_values def snake_case__ ( self : Union[str, Any],lowercase_ : float )-> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." A__ = self.basis_function(lowercase_ ) A__ = 0.0 A__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def snake_case__ ( self : Dict,lowercase_ : float = 0.01 )-> Optional[Any]: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore A__ = [] # x coordinates of points to plot A__ = [] # y coordinates of points to plot A__ = 0.0 while t <= 1: A__ = self.bezier_curve_function(lowercase_ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size A__ = [i[0] for i in self.list_of_points] A__ = [i[1] for i in self.list_of_points] plt.plot( lowercase_,lowercase_,color='blue',label='Curve of Degree ' + str(self.degree ),) plt.scatter(lowercase_,lowercase_,color='red',label='Control Points' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
7
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase = 1 , __lowercase = 1_0_0_0 ) -> int: A: Any = 1 A: Optional[Any] = 0 for divide_by_number in range(__lowercase , digit + 1 ): A: list[int] = [] A: List[Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(__lowercase ): A: Any = len(__lowercase ) A: Dict = divide_by_number else: has_been_divided.append(__lowercase ) A: str = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
319
0
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
8
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
319
0
def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''Input must be a positive integer''' ) __SCREAMING_SNAKE_CASE : Tuple = [True] * (num + 1) __SCREAMING_SNAKE_CASE : Dict = 2 while p * p <= num: if primes[p]: for i in range(p * p , num + 1 , lowercase__ ): __SCREAMING_SNAKE_CASE : str = False p += 1 return [prime for prime in range(2 , num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[str] =int(input('Enter a positive integer: ').strip()) print(prime_sieve_eratosthenes(user_num))
9
'''simple docstring''' import fire from utils import calculate_rouge, save_json def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=None , **__lowercase ) -> Any: A: Any = [x.strip() for x in open(__lowercase ).readlines()] A: Dict = [x.strip() for x in open(__lowercase ).readlines()][: len(__lowercase )] A: Union[str, Any] = calculate_rouge(__lowercase , __lowercase , **__lowercase ) if save_path is not None: save_json(__lowercase , __lowercase , indent=__lowercase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
319
0
from typing import Any def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list: """simple docstring""" _validation( __a , __a , __a , __a , __a , ) # Creates data structures and fill initial step lowerCamelCase__: dict ={} lowerCamelCase__: dict ={} for state in states_space: lowerCamelCase__: Optional[Any] =observations_space[0] lowerCamelCase__: List[Any] =( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase__: int =None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(__a ) ): lowerCamelCase__: Tuple =observations_space[o] lowerCamelCase__: Optional[Any] =observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase__: Tuple ="" lowerCamelCase__: Optional[Any] =-1 for k_state in states_space: lowerCamelCase__: int =( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase__: List[str] =probability lowerCamelCase__: int =k_state # Update probabilities and pointers dicts lowerCamelCase__: Any =( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase__: int =arg_max # The final observation lowerCamelCase__: Any =observations_space[len(__a ) - 1] # argmax for given final observation lowerCamelCase__: Optional[Any] ="" lowerCamelCase__: int =-1 for k_state in states_space: lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase__: List[Any] =probability lowerCamelCase__: Dict =k_state lowerCamelCase__: str =arg_max # Process pointers backwards lowerCamelCase__: Union[str, Any] =last_state lowerCamelCase__: List[str] =[] for o in range(len(__a ) - 1 , -1 , -1 ): result.append(__a ) lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]] result.reverse() return result def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None: """simple docstring""" _validate_not_empty( __a , __a , __a , __a , __a , ) _validate_lists(__a , __a ) _validate_dicts( __a , __a , __a ) def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None: """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def lowerCAmelCase_ ( __a , __a ) -> None: """simple docstring""" _validate_list(__a , "observations_space" ) _validate_list(__a , "states_space" ) def lowerCAmelCase_ ( __a , __a ) -> None: """simple docstring""" if not isinstance(_object , __a ): lowerCamelCase__: Tuple =F"""{var_name} must be a list""" raise ValueError(__a ) else: for x in _object: if not isinstance(__a , __a ): lowerCamelCase__: str =F"""{var_name} must be a list of strings""" raise ValueError(__a ) def lowerCAmelCase_ ( __a , __a , __a , ) -> None: """simple docstring""" _validate_dict(__a , "initial_probabilities" , __a ) _validate_nested_dict(__a , "transition_probabilities" ) _validate_nested_dict(__a , "emission_probabilities" ) def lowerCAmelCase_ ( __a , __a ) -> None: """simple docstring""" _validate_dict(_object , __a , __a ) for x in _object.values(): _validate_dict(__a , __a , __a , __a ) def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None: """simple docstring""" if not isinstance(_object , __a ): lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict""" raise ValueError(__a ) if not all(isinstance(__a , __a ) for x in _object ): lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings""" raise ValueError(__a ) if not all(isinstance(__a , __a ) for x in _object.values() ): lowerCamelCase__: Dict ="nested dictionary " if nested else "" lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(__a ) if __name__ == "__main__": from doctest import testmod testmod()
10
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = 0 ) -> list: A: Dict = length or len(__lowercase ) A: Dict = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A , A: Tuple = list_data[i + 1], list_data[i] A: Union[str, Any] = True return list_data if not swapped else bubble_sort(__lowercase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
319
0
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase): '''simple docstring''' __SCREAMING_SNAKE_CASE = VQModel __SCREAMING_SNAKE_CASE = "sample" @property def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]: _A : Optional[int] = 4 _A : Tuple = 3 _A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase) return {"sample": image} @property def _lowerCamelCase ( self) -> int: return (3, 3_2, 3_2) @property def _lowerCamelCase ( self) -> List[Any]: return (3, 3_2, 3_2) def _lowerCamelCase ( self) -> Union[str, Any]: _A : List[Any] = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _A : int = self.dummy_input return init_dict, inputs_dict def _lowerCamelCase ( self) -> Union[str, Any]: pass def _lowerCamelCase ( self) -> Any: pass def _lowerCamelCase ( self) -> Any: _A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) self.assertEqual(len(loading_info["missing_keys"]) , 0) model.to(__lowerCamelCase) _A : str = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def _lowerCamelCase ( self) -> Union[str, Any]: _A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(__lowerCamelCase).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) _A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size) _A : Optional[int] = image.to(__lowerCamelCase) with torch.no_grad(): _A : List[str] = model(__lowerCamelCase).sample _A : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3]) # fmt: on self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
11
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] UpperCamelCase = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: A: List[Any] = torch.load(__lowercase , map_location='''cpu''' ) return sd def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase=rename_keys_prefix ) -> Optional[Any]: A: Tuple = OrderedDict() A: Dict = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue A: int = key for name_pair in rename_keys_prefix: A: Optional[int] = new_key.replace(name_pair[0] , name_pair[1] ) A: Union[str, Any] = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately A: int = new_d['''cls.predictions.bias'''] return new_d @torch.no_grad() def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: assert ( checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS ), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.""" # Get Config if "pre" in checkpoint_path: A: Optional[Any] = '''pretraining''' if "vcr" in checkpoint_path: A: Optional[int] = {'''visual_embedding_dim''': 5_1_2} elif "vqa_advanced" in checkpoint_path: A: Optional[Any] = {'''visual_embedding_dim''': 2_0_4_8} elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8} elif "nlvr" in checkpoint_path: A: Tuple = {'''visual_embedding_dim''': 1_0_2_4} else: raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" ) else: if "vcr" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 5_1_2} A: List[str] = '''multichoice''' elif "vqa_advanced" in checkpoint_path: A: List[str] = {'''visual_embedding_dim''': 2_0_4_8} A: Optional[int] = '''vqa_advanced''' elif "vqa" in checkpoint_path: A: Dict = {'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9} A: Union[str, Any] = '''vqa''' elif "nlvr" in checkpoint_path: A: Optional[int] = { '''visual_embedding_dim''': 1_0_2_4, '''num_labels''': 2, } A: str = '''nlvr''' A: Union[str, Any] = VisualBertConfig(**__lowercase ) # Load State Dict A: Union[str, Any] = load_state_dict(__lowercase ) A: str = get_new_dict(__lowercase , __lowercase ) if model_type == "pretraining": A: Optional[Any] = VisualBertForPreTraining(__lowercase ) elif model_type == "vqa": A: Optional[Any] = VisualBertForQuestionAnswering(__lowercase ) elif model_type == "nlvr": A: Union[str, Any] = VisualBertForVisualReasoning(__lowercase ) elif model_type == "multichoice": A: Any = VisualBertForMultipleChoice(__lowercase ) model.load_state_dict(__lowercase ) # Save Checkpoints Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') UpperCamelCase = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
319
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { 'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : int = 'gpt_neo' UpperCAmelCase__ : Union[str, Any] = ['past_key_values'] UpperCAmelCase__ : int = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any]=5_02_57 , UpperCamelCase_: Any=20_48 , UpperCamelCase_: Optional[Any]=20_48 , UpperCamelCase_: Any=24 , UpperCamelCase_: int=[[["global", "local"], 12]] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: str=None , UpperCamelCase_: Any=2_56 , UpperCamelCase_: List[str]="gelu_new" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Optional[Any]=0.0 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[Any]=1E-5 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: int=True , UpperCamelCase_: Optional[Any]=5_02_56 , UpperCamelCase_: Dict=5_02_56 , **UpperCamelCase_: int , ): __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = num_layers __lowerCamelCase = num_heads __lowerCamelCase = intermediate_size __lowerCamelCase = window_size __lowerCamelCase = activation_function __lowerCamelCase = resid_dropout __lowerCamelCase = embed_dropout __lowerCamelCase = attention_dropout __lowerCamelCase = classifier_dropout __lowerCamelCase = layer_norm_epsilon __lowerCamelCase = initializer_range __lowerCamelCase = use_cache __lowerCamelCase = bos_token_id __lowerCamelCase = eos_token_id __lowerCamelCase = attention_types __lowerCamelCase = self.expand_attention_types_params(UpperCamelCase_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.attention_layers)` == `config.num_layers` """ F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' """`config.attention_layers` is prepared using `config.attention_types`. """ """Please verify the value of `config.attention_types` argument.""" ) super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) @staticmethod def lowerCAmelCase__ ( UpperCamelCase_: Tuple ): __lowerCamelCase = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def lowerCamelCase__ ( A__ : int , A__ : Optional[Any] , A__ : Optional[int] , A__ : List[Any] ): '''simple docstring''' import torch __lowerCamelCase = input.size() __lowerCamelCase = len(A__ ) __lowerCamelCase = shape[dimension] __lowerCamelCase = torch.arange(0 , A__ , A__ ) __lowerCamelCase = torch.div(sizedim - size , A__ , rounding_mode="""floor""" ) + 1 __lowerCamelCase = torch.arange(A__ ) + low_indices[:min_length][:, None] __lowerCamelCase = [slice(A__ )] * rank __lowerCamelCase = indices __lowerCamelCase = input[s] __lowerCamelCase = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(A__ ) def lowerCamelCase__ ( A__ : Optional[int] , A__ : List[str] ): '''simple docstring''' import torch __lowerCamelCase = torch.arange(1 , A__ ) __lowerCamelCase = torch.remainder(A__ , A__ ) __lowerCamelCase = remainders == 0 __lowerCamelCase = candidates[divisor_indices] __lowerCamelCase = torch.max(A__ ) return largest_divisor, torch.div(A__ , A__ , rounding_mode="""floor""" ) class lowerCamelCase__( __lowerCamelCase): @property def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase_ , direction="""inputs""" ) __lowerCamelCase = {0: """batch""", 1: """past_sequence + sequence"""} else: __lowerCamelCase = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCAmelCase__ ( self: Dict ): return self._config.num_heads def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[TensorType] = None , ): __lowerCamelCase = super(UpperCamelCase_ , self ).generate_dummy_inputs( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ ) # We need to order the input in the way they appears in the forward() __lowerCamelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __lowerCamelCase, __lowerCamelCase = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __lowerCamelCase = seqlen + 2 __lowerCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __lowerCamelCase = [ (torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers ) ] __lowerCamelCase = common_inputs["""attention_mask"""] if self.use_past: __lowerCamelCase = ordered_inputs["""attention_mask"""].dtype __lowerCamelCase = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase__ ( self: Optional[Any] ): return 13
12
'''simple docstring''' from itertools import permutations def SCREAMING_SNAKE_CASE( __lowercase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False A: int = [7, 1_1, 1_3, 1_7] for i, test in enumerate(__lowercase ): if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE( __lowercase = 1_0 ) -> int: return sum( int(''''''.join(map(__lowercase , __lowercase ) ) ) for num in permutations(range(__lowercase ) ) if is_substring_divisible(__lowercase ) ) if __name__ == "__main__": print(f'{solution() = }')
319
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase : List[Any] = None lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Optional[Any] = { """vocab_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""", }, """tokenizer_file""": { """t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""", }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase : str = { """t5-small""": 512, """t5-base""": 512, """t5-large""": 512, """t5-3b""": 512, """t5-11b""": 512, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Optional[int] = TaTokenizer _UpperCAmelCase : List[int] = [] def __init__( self : Any , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : List[Any]=100 , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Tuple , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE_: Optional[int] = [F"<extra_id_{i}>" for i in range(lowerCAmelCase__)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens SCREAMING_SNAKE_CASE_: List[str] = len(set(filter(lambda lowerCAmelCase__: bool("extra_id_" in str(lowerCAmelCase__)) , lowerCAmelCase__))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids" " tokens") super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Dict = vocab_file SCREAMING_SNAKE_CASE_: Dict = False if not self.vocab_file else True SCREAMING_SNAKE_CASE_: List[str] = extra_ids @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any): if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: SCREAMING_SNAKE_CASE_: List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( "This tokenizer was incorrectly instantiated with a model max length of" F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" " behavior is kept to avoid breaking backwards compatibility when padding/encoding with" " `truncation is True`.\n- Be aware that you SHOULD NOT rely on" F" {pretrained_model_name_or_path} automatically truncating your input to" F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" " `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please" " instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase__ , ) return max_model_length def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(lowerCAmelCase__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return SCREAMING_SNAKE_CASE_: Tuple = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file , lowerCAmelCase__) logger.info(F"Copy vocab file to {out_vocab_file}") return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: SCREAMING_SNAKE_CASE_: Optional[int] = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: List[Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def _SCREAMING_SNAKE_CASE ( self : List[str]): return list( set(filter(lambda lowerCAmelCase__: bool(re.search(R"<extra_id_\d+>" , lowerCAmelCase__)) is not None , self.additional_special_tokens))) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): return [self.convert_tokens_to_ids(lowerCAmelCase__) for token in self.get_sentinel_tokens()]
13
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} UpperCamelCase = { '''vocab_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json''' ), }, '''merges_file''': { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''', '''allenai/longformer-large-4096''': ( '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt''' ), }, } UpperCamelCase = { '''allenai/longformer-base-4096''': 4096, '''allenai/longformer-large-4096''': 4096, '''allenai/longformer-large-4096-finetuned-triviaqa''': 4096, '''allenai/longformer-base-4096-extra.pos.embd.only''': 4096, '''allenai/longformer-large-4096-extra.pos.embd.only''': 4096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def SCREAMING_SNAKE_CASE( ) -> Dict: A: Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) A: Union[str, Any] = bs[:] A: List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 A: List[Any] = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]: A: Optional[Any] = set() A: Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) A: List[Any] = char return pairs class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : int = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str="replace" , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : str="<unk>" , SCREAMING_SNAKE_CASE_ : Dict="<pad>" , SCREAMING_SNAKE_CASE_ : Dict="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> List[str]: '''simple docstring''' A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token A: int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token A: Any = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token A: str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A: Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: A: str = json.load(SCREAMING_SNAKE_CASE_ ) A: str = {v: k for k, v in self.encoder.items()} A: Union[str, Any] = errors # how to handle errors in decoding A: Optional[int] = bytes_to_unicode() A: Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: A: int = merges_handle.read().split('''\n''' )[1:-1] A: str = [tuple(merge.split() ) for merge in bpe_merges] A: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Union[str, Any] = {} A: Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A: Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' return len(self.encoder ) def _snake_case ( self : Optional[Any] ) -> int: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if token in self.cache: return self.cache[token] A: str = tuple(SCREAMING_SNAKE_CASE_ ) A: str = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: A: Dict = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break A , A: Optional[Any] = bigram A: Tuple = [] A: List[Any] = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: A: Union[str, Any] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A: int = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A: Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ ) A: Any = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: A: Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ ) A: str = ''' '''.join(SCREAMING_SNAKE_CASE_ ) A: str = word return word def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A: Dict = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): A: Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Optional[int] = ''''''.join(SCREAMING_SNAKE_CASE_ ) A: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Union[str, Any] = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) A: int = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) A: Any = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) A: Union[str, Any] = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: int = [self.cls_token_id] A: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: Dict = [self.sep_token_id] A: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=False , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int: '''simple docstring''' A: Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): A: List[Any] = ''' ''' + text return (text, kwargs)
319
0
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np _lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 _lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2) if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( ) -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) ) benchmark()
14
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> int: if not isinstance(__lowercase , __lowercase ): raise TypeError('''only integers accepted as input''' ) else: A: str = str(abs(__lowercase ) ) A: int = [list(__lowercase ) for char in range(len(__lowercase ) )] for index in range(len(__lowercase ) ): num_transpositions[index].pop(__lowercase ) return max( int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
319
0
import math def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" if ( not isinstance(a_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def UpperCAmelCase ( a_ , a_ ) -> float: """simple docstring""" if ( not isinstance(a_ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
15
'''simple docstring''' from __future__ import annotations import math def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if len(__lowercase ) != 2 or len(a[0] ) != 2 or len(__lowercase ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) A: str = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowercase ) ) ] def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[list, list, list, list]: if len(__lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) A: Union[str, Any] = len(__lowercase ) A: str = matrix_length // 2 A: Optional[int] = [[a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase )] A: Optional[Any] = [ [a[i][j] for j in range(__lowercase , __lowercase )] for i in range(__lowercase , __lowercase ) ] A: Union[str, Any] = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase )] A: int = [[a[i][j] for j in range(__lowercase )] for i in range(__lowercase , __lowercase )] return top_left, top_right, bot_left, bot_right def SCREAMING_SNAKE_CASE( __lowercase ) -> tuple[int, int]: return len(__lowercase ), len(matrix[0] ) def SCREAMING_SNAKE_CASE( __lowercase ) -> None: print('''\n'''.join(str(__lowercase ) for line in matrix ) ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase ) == (2, 2): return default_matrix_multiplication(__lowercase , __lowercase ) A , A , A , A: Union[str, Any] = split_matrix(__lowercase ) A , A , A , A: List[Any] = split_matrix(__lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Any = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , __lowercase ) A: Optional[int] = actual_strassen(__lowercase , matrix_subtraction(__lowercase , __lowercase ) ) A: Tuple = actual_strassen(matrix_addition(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: Union[str, Any] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: List[str] = actual_strassen(matrix_subtraction(__lowercase , __lowercase ) , matrix_addition(__lowercase , __lowercase ) ) A: int = matrix_addition(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) A: Any = matrix_addition(__lowercase , __lowercase ) A: List[Any] = matrix_addition(__lowercase , __lowercase ) A: List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(__lowercase , __lowercase ) , __lowercase ) , __lowercase ) # construct the new matrix from our 4 quadrants A: Union[str, Any] = [] for i in range(len(__lowercase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__lowercase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> list: if matrix_dimensions(__lowercase )[1] != matrix_dimensions(__lowercase )[0]: A: int = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(__lowercase ) A: str = matrix_dimensions(__lowercase ) A: str = matrix_dimensions(__lowercase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] A: Union[str, Any] = max(*__lowercase , *__lowercase ) A: Optional[int] = int(math.pow(2 , math.ceil(math.loga(__lowercase ) ) ) ) A: List[Any] = matrixa A: Tuple = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) A: Any = actual_strassen(__lowercase , __lowercase ) # Removing the additional zeros for i in range(0 , __lowercase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowercase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": UpperCamelCase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] UpperCamelCase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
319
0
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple: lowercase__ : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' lowercase__ : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' ) lowercase__ : Any = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ), ] ) lowercase__ : Dict = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) return image def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]: if "visual_encoder" in key: lowercase__ : Any = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __lowerCamelCase ) if "blocks" in key: lowercase__ : List[str] = re.sub(r'''blocks''' , '''layers''' , __lowerCamelCase ) if "attn" in key: lowercase__ : List[str] = re.sub(r'''attn''' , '''self_attn''' , __lowerCamelCase ) if "norm1" in key: lowercase__ : Dict = re.sub(r'''norm1''' , '''layer_norm1''' , __lowerCamelCase ) if "norm2" in key: lowercase__ : Optional[Any] = re.sub(r'''norm2''' , '''layer_norm2''' , __lowerCamelCase ) if "encoder.norm" in key: lowercase__ : Union[str, Any] = re.sub(r'''encoder.norm''' , '''post_layernorm''' , __lowerCamelCase ) if "encoder.patch_embed.proj" in key: lowercase__ : int = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __lowerCamelCase ) if "encoder.pos_embed" in key: lowercase__ : int = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __lowerCamelCase ) if "encoder.cls_token" in key: lowercase__ : Dict = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , __lowerCamelCase ) if "self_attn" in key: lowercase__ : Optional[int] = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , __lowerCamelCase ) return key @torch.no_grad() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Optional[int]: if config_path is not None: lowercase__ : Union[str, Any] = BlipConfig.from_pretrained(__lowerCamelCase ) else: lowercase__ : str = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) lowercase__ : Dict = BlipForConditionalGeneration(__lowerCamelCase ).eval() lowercase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' lowercase__ : Optional[int] = blip_decoder(pretrained=__lowerCamelCase , image_size=3_84 , vit='''base''' ) lowercase__ : Optional[int] = pt_model.eval() lowercase__ : Union[str, Any] = pt_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : Any = modified_state_dict.pop(__lowerCamelCase ) lowercase__ : Union[str, Any] = rename_key(__lowerCamelCase ) lowercase__ : Union[str, Any] = value hf_model.load_state_dict(__lowerCamelCase ) lowercase__ : int = 3_84 lowercase__ : Optional[int] = load_demo_image(image_size=__lowerCamelCase , device='''cpu''' ) lowercase__ : str = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowercase__ : Dict = tokenizer(['''a picture of'''] ).input_ids lowercase__ : Tuple = hf_model.generate(__lowerCamelCase , __lowerCamelCase ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] lowercase__ : Tuple = hf_model.generate(__lowerCamelCase ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(__lowerCamelCase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' lowercase__ : int = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) lowercase__ : Optional[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit='''base''' ) vqa_model.eval() lowercase__ : Optional[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : str = modified_state_dict.pop(__lowerCamelCase ) lowercase__ : Optional[Any] = rename_key(__lowerCamelCase ) lowercase__ : List[Any] = value lowercase__ : Tuple = BlipForQuestionAnswering(__lowerCamelCase ) hf_vqa_model.load_state_dict(__lowerCamelCase ) lowercase__ : Union[str, Any] = ['''How many dogs are in this image?'''] lowercase__ : Tuple = tokenizer(__lowerCamelCase , return_tensors='''pt''' ).input_ids lowercase__ : Optional[int] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) lowercase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' lowercase__ : str = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit='''base''' ) itm_model.eval() lowercase__ : str = itm_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : str = modified_state_dict.pop(__lowerCamelCase ) lowercase__ : List[Any] = rename_key(__lowerCamelCase ) lowercase__ : Union[str, Any] = value lowercase__ : Union[str, Any] = BlipForImageTextRetrieval(__lowerCamelCase ) lowercase__ : Any = ['''A picture of a woman with a dog sitting in a beach'''] lowercase__ : Optional[Any] = tokenizer( __lowerCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__lowerCamelCase , max_length=35 , ).input_ids hf_itm_model.load_state_dict(__lowerCamelCase ) hf_itm_model.eval() lowercase__ : Optional[Any] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase ) lowercase__ : Any = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase ) assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCAmelCase_ = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
16
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = 2 @register_to_config def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = sigma_max # setable values A: int = None A: np.IntTensor = None A: torch.FloatTensor = None # sigma(t_i) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' A: List[Any] = num_inference_steps A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) A: str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A: List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) A: Optional[Any] = sigma + gamma * sigma A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: Union[str, Any] = sample_hat + sigma_hat * model_output A: str = (sample_hat - pred_original_sample) / sigma_hat A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: int = sample_prev + sigma_prev * model_output A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' raise NotImplementedError()
319
0
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : """simple docstring""" def __init__( self : str, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str]=2, UpperCAmelCase__ : str=8, UpperCAmelCase__ : int=True, UpperCAmelCase__ : str=True, UpperCAmelCase__ : Optional[int]=True, UpperCAmelCase__ : Any=True, UpperCAmelCase__ : Tuple=9_9, UpperCAmelCase__ : Dict=1_6, UpperCAmelCase__ : Optional[int]=5, UpperCAmelCase__ : Any=2, UpperCAmelCase__ : int=3_6, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : str=0.0, UpperCAmelCase__ : Dict=0.0, UpperCAmelCase__ : List[str]=5_1_2, UpperCAmelCase__ : Optional[int]=1_6, UpperCAmelCase__ : Tuple=2, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : int=3, UpperCAmelCase__ : Tuple=4, UpperCAmelCase__ : List[Any]=None, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def _lowercase ( self : str ): __lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_token_type_ids: __lowercase = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __lowercase = ids_tensor([self.batch_size], self.num_choices ) __lowercase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Optional[Any] ): return MraConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, ) def _lowercase ( self : Union[str, Any] ): __lowercase = self.get_config() __lowercase = 3_0_0 return config def _lowercase ( self : List[str] ): ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = self.prepare_config_and_inputs() __lowercase = True __lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __lowercase = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict ): __lowercase = MraModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__, token_type_ids=UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[Any], ): __lowercase = True __lowercase = MraModel(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, encoder_attention_mask=UpperCAmelCase__, ) __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__, ) __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : int ): __lowercase = MraForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : str, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str, UpperCAmelCase__ : List[str] ): __lowercase = MraForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, start_positions=UpperCAmelCase__, end_positions=UpperCAmelCase__, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _lowercase ( self : str, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple ): __lowercase = self.num_labels __lowercase = MraForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = MraForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : int, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple ): __lowercase = self.num_choices __lowercase = MraForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, token_type_ids=UpperCAmelCase__, labels=UpperCAmelCase__, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def _lowercase ( self : Union[str, Any] ): __lowercase = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) = config_and_inputs __lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[int] = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __UpperCAmelCase : Any = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : List[Any] = () def _lowercase ( self : List[Any] ): __lowercase = MraModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, hidden_size=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def _lowercase ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def _lowercase ( self : Any ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = MraModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @unittest.skip(reason="MRA does not output attentions" ) def _lowercase ( self : Union[str, Any] ): return @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Tuple ): __lowercase = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) __lowercase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): __lowercase = model(UpperCAmelCase__ )[0] __lowercase = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape, UpperCAmelCase__ ) __lowercase = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 ) ) @slow def _lowercase ( self : Union[str, Any] ): __lowercase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) __lowercase = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): __lowercase = model(UpperCAmelCase__ )[0] __lowercase = 5_0_2_6_5 __lowercase = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape, UpperCAmelCase__ ) __lowercase = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 ) ) @slow def _lowercase ( self : Tuple ): __lowercase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) __lowercase = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): __lowercase = model(UpperCAmelCase__ )[0] __lowercase = 5_0_2_6_5 __lowercase = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape, UpperCAmelCase__ ) __lowercase = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], UpperCAmelCase__, atol=1E-4 ) )
17
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase_ )} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) UpperCamelCase_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def _snake_case ( self : Tuple ) -> List[Any]: '''simple docstring''' if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( '''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' ) @dataclass class lowerCAmelCase_ : '''simple docstring''' UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) UpperCamelCase_ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The input training data file (a text file)."""} ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : Optional[str] = field( default=UpperCAmelCase_ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) UpperCamelCase_ : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) UpperCamelCase_ : Optional[int] = field( default=UpperCAmelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) UpperCamelCase_ : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} ) UpperCamelCase_ : bool = field( default=UpperCAmelCase_ , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def _snake_case ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if self.train_file is not None: A: Tuple = self.train_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: A: str = self.validation_file.split('''.''' )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> List[str]: with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: A: List[Any] = [json.loads(__lowercase ) for line in f.read().splitlines() if (len(__lowercase ) > 0 and not line.isspace())] assert len(__lowercase ) == len(__lowercase ) A: Optional[int] = {c: dataset[c] for c in dataset.column_names} A: Union[str, Any] = refs return Dataset.from_dict(__lowercase ) def SCREAMING_SNAKE_CASE( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A: int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A , A , A: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A , A , A: List[Any] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. A: Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A: Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __lowercase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. A: Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): A: int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) A: Dict = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: A: Any = {} if data_args.train_file is not None: A: int = data_args.train_file if data_args.validation_file is not None: A: Optional[int] = data_args.validation_file A: List[str] = data_args.train_file.split('''.''' )[-1] if extension == "txt": A: int = '''text''' A: Any = load_dataset(__lowercase , data_files=__lowercase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A: Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: A: List[Any] = AutoConfig.from_pretrained(model_args.config_name , **__lowercase ) elif model_args.model_name_or_path: A: int = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: A: str = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) A: Tuple = { '''cache_dir''': model_args.cache_dir, '''use_fast''': model_args.use_fast_tokenizer, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.tokenizer_name: A: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowercase ) elif model_args.model_name_or_path: A: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowercase ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported by this script.''' '''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' ) if model_args.model_name_or_path: A: List[Any] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) A: List[Any] = AutoModelForMaskedLM.from_config(__lowercase ) model.resize_token_embeddings(len(__lowercase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: A: int = datasets['''train'''].column_names else: A: str = datasets['''validation'''].column_names A: Tuple = '''text''' if '''text''' in column_names else column_names[0] A: List[str] = '''max_length''' if data_args.pad_to_max_length else False def tokenize_function(__lowercase ): # Remove empty lines A: int = [line for line in examples['''text'''] if len(__lowercase ) > 0 and not line.isspace()] return tokenizer(examples['''text'''] , padding=__lowercase , truncation=__lowercase , max_length=data_args.max_seq_length ) A: str = datasets.map( __lowercase , batched=__lowercase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: A: List[str] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: A: Dict = add_chinese_references( tokenized_datasets['''validation'''] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer A: Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file if has_ref: A: List[Any] = False # Data collator # This one will take care of randomly masking the tokens. A: Optional[Any] = DataCollatorForWholeWordMask(tokenizer=__lowercase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A: Optional[int] = Trainer( model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=__lowercase , data_collator=__lowercase , ) # Training if training_args.do_train: if last_checkpoint is not None: A: Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): A: str = model_args.model_name_or_path else: A: List[str] = None A: str = trainer.train(resume_from_checkpoint=__lowercase ) trainer.save_model() # Saves the tokenizer too for easy upload A: Union[str, Any] = os.path.join(training_args.output_dir , '''train_results.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Train results *****''' ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # Evaluation A: Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) A: Optional[Any] = trainer.evaluate() A: Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) A: Dict = perplexity A: Any = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' ) if trainer.is_world_process_zero(): with open(__lowercase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def SCREAMING_SNAKE_CASE( __lowercase ) -> List[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
319
0
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class a__ : def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = parent SCREAMING_SNAKE_CASE_ : Any = 13 SCREAMING_SNAKE_CASE_ : List[str] = 7 SCREAMING_SNAKE_CASE_ : Dict = True SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = 99 SCREAMING_SNAKE_CASE_ : Tuple = 384 SCREAMING_SNAKE_CASE_ : Optional[Any] = 2 SCREAMING_SNAKE_CASE_ : Any = 4 SCREAMING_SNAKE_CASE_ : str = 37 SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu" SCREAMING_SNAKE_CASE_ : List[Any] = 0.1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1 SCREAMING_SNAKE_CASE_ : Dict = 512 SCREAMING_SNAKE_CASE_ : int = 16 SCREAMING_SNAKE_CASE_ : Optional[int] = 2 SCREAMING_SNAKE_CASE_ : Any = 0.02 SCREAMING_SNAKE_CASE_ : str = 3 SCREAMING_SNAKE_CASE_ : int = 4 SCREAMING_SNAKE_CASE_ : Dict = 128 SCREAMING_SNAKE_CASE_ : Any = 2 SCREAMING_SNAKE_CASE_ : Tuple = 9 SCREAMING_SNAKE_CASE_ : List[Any] = 1 SCREAMING_SNAKE_CASE_ : Any = None def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) SCREAMING_SNAKE_CASE_ : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size ) SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : str = None if self.use_labels: SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size ) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices ) SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask] SCREAMING_SNAKE_CASE_ : List[str] = model(_A ) SCREAMING_SNAKE_CASE_ : Dict = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : List[Any] = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A ) SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE_ : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE_ : int = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A ) SCREAMING_SNAKE_CASE_ : Tuple = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : str = model(_A ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A ) SCREAMING_SNAKE_CASE_ : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } SCREAMING_SNAKE_CASE_ : Any = model(_A ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[Any] = config_and_inputs SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class a__ ( A__ , A__ , unittest.TestCase ): A = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A = False A = False A = False def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self ) SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A ) def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A ) def __UpperCamelCase ( self : str ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A ) def __UpperCamelCase ( self : List[str] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A ) def __UpperCamelCase ( self : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_A ) @slow def __UpperCamelCase ( self : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : Any = True if hasattr(_A,"use_cache" ): SCREAMING_SNAKE_CASE_ : List[Any] = True SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A ) SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A ) SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A,saved_model=_A ) SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" ) SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A ) SCREAMING_SNAKE_CASE_ : str = model(_A ) if self.is_encoder_decoder: SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"] SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"] else: SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"] SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"] self.assertEqual(len(_A ),_A ) SCREAMING_SNAKE_CASE_ : Any = getattr( self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_A ),_A ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],) self.assertEqual(len(_A ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],) @slow def __UpperCamelCase ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(_A ) def __UpperCamelCase ( self : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : List[str] = True SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length ) SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A ) SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A ) def check_decoder_attentions_output(_A : Dict ): SCREAMING_SNAKE_CASE_ : int = len(_A ) self.assertEqual(out_len % 2,0 ) SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions self.assertEqual(len(_A ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],) def check_encoder_attentions_output(_A : Tuple ): SCREAMING_SNAKE_CASE_ : int = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_A ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ : Optional[Any] = True SCREAMING_SNAKE_CASE_ : Optional[Any] = False SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A ) SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) ) SCREAMING_SNAKE_CASE_ : Tuple = len(_A ) self.assertEqual(config.output_hidden_states,_A ) check_encoder_attentions_output(_A ) if self.is_encoder_decoder: SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A ) SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) ) self.assertEqual(config.output_hidden_states,_A ) check_decoder_attentions_output(_A ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : int = model_class(_A ) SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) ) self.assertEqual(config.output_hidden_states,_A ) check_encoder_attentions_output(_A ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Dict = model_class(_A ) SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) ) self.assertEqual(model.config.output_hidden_states,_A ) check_encoder_attentions_output(_A ) @require_tf class a__ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0] SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768] self.assertEqual(output.shape,_A ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
18
'''simple docstring''' import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer UpperCamelCase_ : Tuple = False def _snake_case ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() A: Optional[int] = ( '''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ''' '''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ''' '''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ''' '''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ''' '''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ''' '''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ''' '''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ''' '''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ''' '''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ''' '''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ''' '''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ''' '''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ''' '''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4''' ).split(''' ''' ) A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''} A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]: '''simple docstring''' A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length: A: int = toks[:max_length] if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0: while len(SCREAMING_SNAKE_CASE_ ) < min_length: A: Dict = toks + toks # toks_str = [t[1] for t in toks] A: Union[str, Any] = [t[0] for t in toks] # Ensure consistency A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1: A: int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) ) if with_prefix_space: A: Tuple = ''' ''' + output_txt A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) return output_txt, output_ids def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> Optional[Any]: '''simple docstring''' A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) # check adding a single token tokenizer.add_tokens('''xxx''' ) A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] ) A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Any = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) def _snake_case ( self : Tuple ) -> Dict: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: List[str] = '''Hello how are you''' A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Dict ) -> Optional[Any]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: Optional[Any] = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] A: List[str] = tokenizer.decode(sample_ids[0] ) A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) def _snake_case ( self : Any ) -> Optional[int]: '''simple docstring''' A: int = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: List[Any] = '''Hello how are you''' A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' ) def _snake_case ( self : List[str] ) -> int: '''simple docstring''' A: Optional[Any] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Optional[Any] = '''Hello how are you''' A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids ) def _snake_case ( self : Dict ) -> Any: '''simple docstring''' A: Optional[int] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off A: str = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter A: Tuple = tokenizer.decode(sample_ids[0] ) A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] ) # decode with no word_del_token filter A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] ) def _snake_case ( self : int ) -> List[str]: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Union[str, Any] = '''Hello how are you''' A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Any: '''simple docstring''' A: Dict = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) A: Any = '''Hello how are you''' A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ) A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] ) -> Optional[Any]: '''simple docstring''' A: List[str] = self.tokenizer_class.from_pretrained( '''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ ) A: List[Any] = '''Hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' ) def _snake_case ( self : str ) -> str: '''simple docstring''' A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) A: str = '''Hello how Are you''' A: Union[str, Any] = '''hello how are you''' A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : int ) -> List[Any]: '''simple docstring''' A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' ) tokenizer.add_tokens(['''!''', '''?'''] ) tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} ) # fmt: off A: Tuple = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] ) @staticmethod def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple: '''simple docstring''' A: Any = [d[key] for d in offsets] return retrieved_list def _snake_case ( self : Any ) -> Tuple: '''simple docstring''' A: str = self.get_tokenizer(word_delimiter_token='''|''' ) tokenizer.add_tokens('''|''' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''char_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def _snake_case ( self : Any ) -> List[Any]: '''simple docstring''' A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' ) def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) ) # transform list to ModelOutput A: Dict = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] ) def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): [recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] ) # fmt: off A: int = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids] check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' ) def _snake_case ( self : int ) -> int: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' ) def _snake_case ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' ) def _snake_case ( self : List[str] ) -> List[str]: '''simple docstring''' pass @unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' ) def _snake_case ( self : Dict ) -> List[Any]: '''simple docstring''' pass def _snake_case ( self : Tuple ) -> Any: '''simple docstring''' A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: str = tokenizer.vocab_size A: str = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) ) A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ ) A: Optional[Any] = tokenizer.vocab_size A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) ) A: int = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' ) def _snake_case ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass def _snake_case ( self : str ) -> Tuple: '''simple docstring''' A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t'''] A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
319
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __A ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def lowerCamelCase_ ( ): lowerCamelCase_ = _ask_options( "In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: lowerCamelCase_ = get_sagemaker_input() else: lowerCamelCase_ = get_cluster_input() return config def lowerCamelCase_ ( lowerCamelCase__=None ): if subparsers is not None: lowerCamelCase_ = subparsers.add_parser("config" , description=lowerCamelCase__ ) else: lowerCamelCase_ = argparse.ArgumentParser("Accelerate config command" , description=lowerCamelCase__ ) parser.add_argument( "--config_file" , default=lowerCamelCase__ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=lowerCamelCase__ ) return parser def lowerCamelCase_ ( lowerCamelCase__ ): lowerCamelCase_ = get_user_input() if args.config_file is not None: lowerCamelCase_ = args.config_file else: if not os.path.isdir(lowerCamelCase__ ): os.makedirs(lowerCamelCase__ ) lowerCamelCase_ = default_yaml_config_file if config_file.endswith(".json" ): config.to_json_file(lowerCamelCase__ ) else: config.to_yaml_file(lowerCamelCase__ ) print(F'accelerate configuration saved at {config_file}' ) def lowerCamelCase_ ( ): lowerCamelCase_ = config_command_parser() lowerCamelCase_ = parser.parse_args() config_command(lowerCamelCase__ ) if __name__ == "__main__": main()
19
'''simple docstring''' import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> None: '''simple docstring''' warnings.warn( '''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use BeitImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , ) super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
319
0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int: lowercase : Any = hex_num.strip() if not hex_num: raise ValueError("""No value was passed to the function""" ) lowercase : int = hex_num[0] == """-""" if is_negative: lowercase : List[Any] = hex_num[1:] try: lowercase : str = int(SCREAMING_SNAKE_CASE__ , 16 ) except ValueError: raise ValueError("""Invalid value was passed to the function""" ) lowercase : Dict = """""" while int_num > 0: lowercase : str = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("""-""" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
20
'''simple docstring''' import os import pytest from transformers.dynamic_module_utils import get_imports UpperCamelCase = ''' import os ''' UpperCamelCase = ''' def foo(): import os return False ''' UpperCamelCase = ''' def foo(): def bar(): if True: import os return False return bar() ''' UpperCamelCase = ''' import os try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os def foo(): try: import bar except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except (ImportError, AttributeError): raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except ImportError as e: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar except: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: raise ValueError() ''' UpperCamelCase = ''' import os try: import bar import baz except ImportError: x = 1 raise ValueError() ''' UpperCamelCase = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize('''case''' , __lowercase ) def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict: A: Tuple = os.path.join(__lowercase , '''test_file.py''' ) with open(__lowercase , '''w''' ) as _tmp_file: _tmp_file.write(__lowercase ) A: List[Any] = get_imports(__lowercase ) assert parsed_imports == ["os"]
319
0